DFGSpeculativeJIT's m_slowPathLambdas should restore the current node field and DFG...
[WebKit-https.git] / Source / JavaScriptCore / dfg / DFGSpeculativeJIT.cpp
1 /*
2  * Copyright (C) 2011-2016 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "DFGSpeculativeJIT.h"
28
29 #if ENABLE(DFG_JIT)
30
31 #include "BinarySwitch.h"
32 #include "DFGAbstractInterpreterInlines.h"
33 #include "DFGArrayifySlowPathGenerator.h"
34 #include "DFGCallArrayAllocatorSlowPathGenerator.h"
35 #include "DFGCallCreateDirectArgumentsSlowPathGenerator.h"
36 #include "DFGMayExit.h"
37 #include "DFGOSRExitFuzz.h"
38 #include "DFGSaneStringGetByValSlowPathGenerator.h"
39 #include "DFGSlowPathGenerator.h"
40 #include "DirectArguments.h"
41 #include "JITAddGenerator.h"
42 #include "JITBitAndGenerator.h"
43 #include "JITBitOrGenerator.h"
44 #include "JITBitXorGenerator.h"
45 #include "JITDivGenerator.h"
46 #include "JITLeftShiftGenerator.h"
47 #include "JITMulGenerator.h"
48 #include "JITRightShiftGenerator.h"
49 #include "JITSubGenerator.h"
50 #include "JSCInlines.h"
51 #include "JSEnvironmentRecord.h"
52 #include "JSGeneratorFunction.h"
53 #include "JSLexicalEnvironment.h"
54 #include "LinkBuffer.h"
55 #include "RegExpConstructor.h"
56 #include "ScopedArguments.h"
57 #include "ScratchRegisterAllocator.h"
58 #include "WriteBarrierBuffer.h"
59 #include <wtf/MathExtras.h>
60
61 namespace JSC { namespace DFG {
62
63 SpeculativeJIT::SpeculativeJIT(JITCompiler& jit)
64     : m_compileOkay(true)
65     , m_jit(jit)
66     , m_currentNode(0)
67     , m_lastGeneratedNode(LastNodeType)
68     , m_indexInBlock(0)
69     , m_generationInfo(m_jit.graph().frameRegisterCount())
70     , m_state(m_jit.graph())
71     , m_interpreter(m_jit.graph(), m_state)
72     , m_stream(&jit.jitCode()->variableEventStream)
73     , m_minifiedGraph(&jit.jitCode()->minifiedDFG)
74 {
75 }
76
77 SpeculativeJIT::~SpeculativeJIT()
78 {
79 }
80
81 void SpeculativeJIT::emitAllocateRawObject(GPRReg resultGPR, Structure* structure, GPRReg storageGPR, unsigned numElements, unsigned vectorLength)
82 {
83     IndexingType indexingType = structure->indexingType();
84     bool hasIndexingHeader = hasIndexedProperties(indexingType);
85
86     unsigned inlineCapacity = structure->inlineCapacity();
87     unsigned outOfLineCapacity = structure->outOfLineCapacity();
88     
89     GPRTemporary scratch(this);
90     GPRTemporary scratch2(this);
91     GPRReg scratchGPR = scratch.gpr();
92     GPRReg scratch2GPR = scratch2.gpr();
93
94     ASSERT(vectorLength >= numElements);
95     vectorLength = std::max(BASE_VECTOR_LEN, vectorLength);
96     
97     JITCompiler::JumpList slowCases;
98
99     size_t size = 0;
100     if (hasIndexingHeader)
101         size += vectorLength * sizeof(JSValue) + sizeof(IndexingHeader);
102     size += outOfLineCapacity * sizeof(JSValue);
103
104     if (size) {
105         slowCases.append(
106             emitAllocateBasicStorage(TrustedImm32(size), storageGPR));
107         if (hasIndexingHeader)
108             m_jit.subPtr(TrustedImm32(vectorLength * sizeof(JSValue)), storageGPR);
109         else
110             m_jit.addPtr(TrustedImm32(sizeof(IndexingHeader)), storageGPR);
111     } else
112         m_jit.move(TrustedImmPtr(0), storageGPR);
113
114     size_t allocationSize = JSFinalObject::allocationSize(inlineCapacity);
115     MarkedAllocator* allocatorPtr = &m_jit.vm()->heap.allocatorForObjectWithoutDestructor(allocationSize);
116     m_jit.move(TrustedImmPtr(allocatorPtr), scratchGPR);
117     emitAllocateJSObject(resultGPR, scratchGPR, TrustedImmPtr(structure), storageGPR, scratch2GPR, slowCases);
118
119     if (hasIndexingHeader)
120         m_jit.store32(TrustedImm32(vectorLength), MacroAssembler::Address(storageGPR, Butterfly::offsetOfVectorLength()));
121
122     // I want a slow path that also loads out the storage pointer, and that's
123     // what this custom CallArrayAllocatorSlowPathGenerator gives me. It's a lot
124     // of work for a very small piece of functionality. :-/
125     addSlowPathGenerator(std::make_unique<CallArrayAllocatorSlowPathGenerator>(
126         slowCases, this, operationNewRawObject, resultGPR, storageGPR,
127         structure, vectorLength));
128
129     if (hasDouble(structure->indexingType()) && numElements < vectorLength) {
130 #if USE(JSVALUE64)
131         m_jit.move(TrustedImm64(bitwise_cast<int64_t>(PNaN)), scratchGPR);
132         for (unsigned i = numElements; i < vectorLength; ++i)
133             m_jit.store64(scratchGPR, MacroAssembler::Address(storageGPR, sizeof(double) * i));
134 #else
135         EncodedValueDescriptor value;
136         value.asInt64 = JSValue::encode(JSValue(JSValue::EncodeAsDouble, PNaN));
137         for (unsigned i = numElements; i < vectorLength; ++i) {
138             m_jit.store32(TrustedImm32(value.asBits.tag), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
139             m_jit.store32(TrustedImm32(value.asBits.payload), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
140         }
141 #endif
142     }
143     
144     if (hasIndexingHeader)
145         m_jit.store32(TrustedImm32(numElements), MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()));
146 }
147
148 void SpeculativeJIT::emitGetLength(InlineCallFrame* inlineCallFrame, GPRReg lengthGPR, bool includeThis)
149 {
150     if (inlineCallFrame && !inlineCallFrame->isVarargs())
151         m_jit.move(TrustedImm32(inlineCallFrame->arguments.size() - !includeThis), lengthGPR);
152     else {
153         VirtualRegister argumentCountRegister;
154         if (!inlineCallFrame)
155             argumentCountRegister = VirtualRegister(JSStack::ArgumentCount);
156         else
157             argumentCountRegister = inlineCallFrame->argumentCountRegister;
158         m_jit.load32(JITCompiler::payloadFor(argumentCountRegister), lengthGPR);
159         if (!includeThis)
160             m_jit.sub32(TrustedImm32(1), lengthGPR);
161     }
162 }
163
164 void SpeculativeJIT::emitGetLength(CodeOrigin origin, GPRReg lengthGPR, bool includeThis)
165 {
166     emitGetLength(origin.inlineCallFrame, lengthGPR, includeThis);
167 }
168
169 void SpeculativeJIT::emitGetCallee(CodeOrigin origin, GPRReg calleeGPR)
170 {
171     if (origin.inlineCallFrame) {
172         if (origin.inlineCallFrame->isClosureCall) {
173             m_jit.loadPtr(
174                 JITCompiler::addressFor(origin.inlineCallFrame->calleeRecovery.virtualRegister()),
175                 calleeGPR);
176         } else {
177             m_jit.move(
178                 TrustedImmPtr(origin.inlineCallFrame->calleeRecovery.constant().asCell()),
179                 calleeGPR);
180         }
181     } else
182         m_jit.loadPtr(JITCompiler::addressFor(JSStack::Callee), calleeGPR);
183 }
184
185 void SpeculativeJIT::emitGetArgumentStart(CodeOrigin origin, GPRReg startGPR)
186 {
187     m_jit.addPtr(
188         TrustedImm32(
189             JITCompiler::argumentsStart(origin).offset() * static_cast<int>(sizeof(Register))),
190         GPRInfo::callFrameRegister, startGPR);
191 }
192
193 MacroAssembler::Jump SpeculativeJIT::emitOSRExitFuzzCheck()
194 {
195     if (!doOSRExitFuzzing())
196         return MacroAssembler::Jump();
197     
198     MacroAssembler::Jump result;
199     
200     m_jit.pushToSave(GPRInfo::regT0);
201     m_jit.load32(&g_numberOfOSRExitFuzzChecks, GPRInfo::regT0);
202     m_jit.add32(TrustedImm32(1), GPRInfo::regT0);
203     m_jit.store32(GPRInfo::regT0, &g_numberOfOSRExitFuzzChecks);
204     unsigned atOrAfter = Options::fireOSRExitFuzzAtOrAfter();
205     unsigned at = Options::fireOSRExitFuzzAt();
206     if (at || atOrAfter) {
207         unsigned threshold;
208         MacroAssembler::RelationalCondition condition;
209         if (atOrAfter) {
210             threshold = atOrAfter;
211             condition = MacroAssembler::Below;
212         } else {
213             threshold = at;
214             condition = MacroAssembler::NotEqual;
215         }
216         MacroAssembler::Jump ok = m_jit.branch32(
217             condition, GPRInfo::regT0, MacroAssembler::TrustedImm32(threshold));
218         m_jit.popToRestore(GPRInfo::regT0);
219         result = m_jit.jump();
220         ok.link(&m_jit);
221     }
222     m_jit.popToRestore(GPRInfo::regT0);
223     
224     return result;
225 }
226
227 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail)
228 {
229     if (!m_compileOkay)
230         return;
231     JITCompiler::Jump fuzzJump = emitOSRExitFuzzCheck();
232     if (fuzzJump.isSet()) {
233         JITCompiler::JumpList jumpsToFail;
234         jumpsToFail.append(fuzzJump);
235         jumpsToFail.append(jumpToFail);
236         m_jit.appendExitInfo(jumpsToFail);
237     } else
238         m_jit.appendExitInfo(jumpToFail);
239     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
240 }
241
242 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, const MacroAssembler::JumpList& jumpsToFail)
243 {
244     if (!m_compileOkay)
245         return;
246     JITCompiler::Jump fuzzJump = emitOSRExitFuzzCheck();
247     if (fuzzJump.isSet()) {
248         JITCompiler::JumpList myJumpsToFail;
249         myJumpsToFail.append(jumpsToFail);
250         myJumpsToFail.append(fuzzJump);
251         m_jit.appendExitInfo(myJumpsToFail);
252     } else
253         m_jit.appendExitInfo(jumpsToFail);
254     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
255 }
256
257 OSRExitJumpPlaceholder SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node)
258 {
259     if (!m_compileOkay)
260         return OSRExitJumpPlaceholder();
261     unsigned index = m_jit.jitCode()->osrExit.size();
262     m_jit.appendExitInfo();
263     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
264     return OSRExitJumpPlaceholder(index);
265 }
266
267 OSRExitJumpPlaceholder SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse)
268 {
269     return speculationCheck(kind, jsValueSource, nodeUse.node());
270 }
271
272 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail)
273 {
274     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail);
275 }
276
277 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, const MacroAssembler::JumpList& jumpsToFail)
278 {
279     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpsToFail);
280 }
281
282 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
283 {
284     if (!m_compileOkay)
285         return;
286     unsigned recoveryIndex = m_jit.jitCode()->appendSpeculationRecovery(recovery);
287     m_jit.appendExitInfo(jumpToFail);
288     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size(), recoveryIndex));
289 }
290
291 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
292 {
293     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail, recovery);
294 }
295
296 void SpeculativeJIT::emitInvalidationPoint(Node* node)
297 {
298     if (!m_compileOkay)
299         return;
300     OSRExitCompilationInfo& info = m_jit.appendExitInfo(JITCompiler::JumpList());
301     m_jit.jitCode()->appendOSRExit(OSRExit(
302         UncountableInvalidation, JSValueSource(),
303         m_jit.graph().methodOfGettingAValueProfileFor(node),
304         this, m_stream->size()));
305     info.m_replacementSource = m_jit.watchpointLabel();
306     ASSERT(info.m_replacementSource.isSet());
307     noResult(node);
308 }
309
310 void SpeculativeJIT::unreachable(Node* node)
311 {
312     m_compileOkay = false;
313     m_jit.abortWithReason(DFGUnreachableNode, node->op());
314 }
315
316 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Node* node)
317 {
318     if (!m_compileOkay)
319         return;
320     speculationCheck(kind, jsValueRegs, node, m_jit.jump());
321     m_compileOkay = false;
322     if (verboseCompilationEnabled())
323         dataLog("Bailing compilation.\n");
324 }
325
326 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Edge nodeUse)
327 {
328     terminateSpeculativeExecution(kind, jsValueRegs, nodeUse.node());
329 }
330
331 void SpeculativeJIT::typeCheck(JSValueSource source, Edge edge, SpeculatedType typesPassedThrough, MacroAssembler::Jump jumpToFail, ExitKind exitKind)
332 {
333     ASSERT(needsTypeCheck(edge, typesPassedThrough));
334     m_interpreter.filter(edge, typesPassedThrough);
335     speculationCheck(exitKind, source, edge.node(), jumpToFail);
336 }
337
338 RegisterSet SpeculativeJIT::usedRegisters()
339 {
340     RegisterSet result;
341     
342     for (unsigned i = GPRInfo::numberOfRegisters; i--;) {
343         GPRReg gpr = GPRInfo::toRegister(i);
344         if (m_gprs.isInUse(gpr))
345             result.set(gpr);
346     }
347     for (unsigned i = FPRInfo::numberOfRegisters; i--;) {
348         FPRReg fpr = FPRInfo::toRegister(i);
349         if (m_fprs.isInUse(fpr))
350             result.set(fpr);
351     }
352     
353     result.merge(RegisterSet::stubUnavailableRegisters());
354     
355     return result;
356 }
357
358 void SpeculativeJIT::addSlowPathGenerator(std::unique_ptr<SlowPathGenerator> slowPathGenerator)
359 {
360     m_slowPathGenerators.append(WTFMove(slowPathGenerator));
361 }
362
363 void SpeculativeJIT::addSlowPathGenerator(std::function<void()> lambda)
364 {
365     m_slowPathLambdas.append(std::make_pair(lambda, m_currentNode));
366 }
367
368 void SpeculativeJIT::runSlowPathGenerators(PCToCodeOriginMapBuilder& pcToCodeOriginMapBuilder)
369 {
370     for (auto& slowPathGenerator : m_slowPathGenerators) {
371         pcToCodeOriginMapBuilder.appendItem(m_jit.label(), slowPathGenerator->origin().semantic);
372         slowPathGenerator->generate(this);
373     }
374     for (auto& generatorPair : m_slowPathLambdas) {
375         Node* currentNode = generatorPair.second;
376         m_currentNode = currentNode;
377         pcToCodeOriginMapBuilder.appendItem(m_jit.label(), currentNode->origin.semantic);
378         generatorPair.first();
379     }
380 }
381
382 void SpeculativeJIT::clearGenerationInfo()
383 {
384     for (unsigned i = 0; i < m_generationInfo.size(); ++i)
385         m_generationInfo[i] = GenerationInfo();
386     m_gprs = RegisterBank<GPRInfo>();
387     m_fprs = RegisterBank<FPRInfo>();
388 }
389
390 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForGPR(VirtualRegister spillMe, GPRReg source)
391 {
392     GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
393     Node* node = info.node();
394     DataFormat registerFormat = info.registerFormat();
395     ASSERT(registerFormat != DataFormatNone);
396     ASSERT(registerFormat != DataFormatDouble);
397         
398     SilentSpillAction spillAction;
399     SilentFillAction fillAction;
400         
401     if (!info.needsSpill())
402         spillAction = DoNothingForSpill;
403     else {
404 #if USE(JSVALUE64)
405         ASSERT(info.gpr() == source);
406         if (registerFormat == DataFormatInt32)
407             spillAction = Store32Payload;
408         else if (registerFormat == DataFormatCell || registerFormat == DataFormatStorage)
409             spillAction = StorePtr;
410         else if (registerFormat == DataFormatInt52 || registerFormat == DataFormatStrictInt52)
411             spillAction = Store64;
412         else {
413             ASSERT(registerFormat & DataFormatJS);
414             spillAction = Store64;
415         }
416 #elif USE(JSVALUE32_64)
417         if (registerFormat & DataFormatJS) {
418             ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
419             spillAction = source == info.tagGPR() ? Store32Tag : Store32Payload;
420         } else {
421             ASSERT(info.gpr() == source);
422             spillAction = Store32Payload;
423         }
424 #endif
425     }
426         
427     if (registerFormat == DataFormatInt32) {
428         ASSERT(info.gpr() == source);
429         ASSERT(isJSInt32(info.registerFormat()));
430         if (node->hasConstant()) {
431             ASSERT(node->isInt32Constant());
432             fillAction = SetInt32Constant;
433         } else
434             fillAction = Load32Payload;
435     } else if (registerFormat == DataFormatBoolean) {
436 #if USE(JSVALUE64)
437         RELEASE_ASSERT_NOT_REACHED();
438 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
439         fillAction = DoNothingForFill;
440 #endif
441 #elif USE(JSVALUE32_64)
442         ASSERT(info.gpr() == source);
443         if (node->hasConstant()) {
444             ASSERT(node->isBooleanConstant());
445             fillAction = SetBooleanConstant;
446         } else
447             fillAction = Load32Payload;
448 #endif
449     } else if (registerFormat == DataFormatCell) {
450         ASSERT(info.gpr() == source);
451         if (node->hasConstant()) {
452             DFG_ASSERT(m_jit.graph(), m_currentNode, node->isCellConstant());
453             node->asCell(); // To get the assertion.
454             fillAction = SetCellConstant;
455         } else {
456 #if USE(JSVALUE64)
457             fillAction = LoadPtr;
458 #else
459             fillAction = Load32Payload;
460 #endif
461         }
462     } else if (registerFormat == DataFormatStorage) {
463         ASSERT(info.gpr() == source);
464         fillAction = LoadPtr;
465     } else if (registerFormat == DataFormatInt52) {
466         if (node->hasConstant())
467             fillAction = SetInt52Constant;
468         else if (info.spillFormat() == DataFormatInt52)
469             fillAction = Load64;
470         else if (info.spillFormat() == DataFormatStrictInt52)
471             fillAction = Load64ShiftInt52Left;
472         else if (info.spillFormat() == DataFormatNone)
473             fillAction = Load64;
474         else {
475             RELEASE_ASSERT_NOT_REACHED();
476 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
477             fillAction = Load64; // Make GCC happy.
478 #endif
479         }
480     } else if (registerFormat == DataFormatStrictInt52) {
481         if (node->hasConstant())
482             fillAction = SetStrictInt52Constant;
483         else if (info.spillFormat() == DataFormatInt52)
484             fillAction = Load64ShiftInt52Right;
485         else if (info.spillFormat() == DataFormatStrictInt52)
486             fillAction = Load64;
487         else if (info.spillFormat() == DataFormatNone)
488             fillAction = Load64;
489         else {
490             RELEASE_ASSERT_NOT_REACHED();
491 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
492             fillAction = Load64; // Make GCC happy.
493 #endif
494         }
495     } else {
496         ASSERT(registerFormat & DataFormatJS);
497 #if USE(JSVALUE64)
498         ASSERT(info.gpr() == source);
499         if (node->hasConstant()) {
500             if (node->isCellConstant())
501                 fillAction = SetTrustedJSConstant;
502             else
503                 fillAction = SetJSConstant;
504         } else if (info.spillFormat() == DataFormatInt32) {
505             ASSERT(registerFormat == DataFormatJSInt32);
506             fillAction = Load32PayloadBoxInt;
507         } else
508             fillAction = Load64;
509 #else
510         ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
511         if (node->hasConstant())
512             fillAction = info.tagGPR() == source ? SetJSConstantTag : SetJSConstantPayload;
513         else if (info.payloadGPR() == source)
514             fillAction = Load32Payload;
515         else { // Fill the Tag
516             switch (info.spillFormat()) {
517             case DataFormatInt32:
518                 ASSERT(registerFormat == DataFormatJSInt32);
519                 fillAction = SetInt32Tag;
520                 break;
521             case DataFormatCell:
522                 ASSERT(registerFormat == DataFormatJSCell);
523                 fillAction = SetCellTag;
524                 break;
525             case DataFormatBoolean:
526                 ASSERT(registerFormat == DataFormatJSBoolean);
527                 fillAction = SetBooleanTag;
528                 break;
529             default:
530                 fillAction = Load32Tag;
531                 break;
532             }
533         }
534 #endif
535     }
536         
537     return SilentRegisterSavePlan(spillAction, fillAction, node, source);
538 }
539     
540 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForFPR(VirtualRegister spillMe, FPRReg source)
541 {
542     GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
543     Node* node = info.node();
544     ASSERT(info.registerFormat() == DataFormatDouble);
545
546     SilentSpillAction spillAction;
547     SilentFillAction fillAction;
548         
549     if (!info.needsSpill())
550         spillAction = DoNothingForSpill;
551     else {
552         ASSERT(!node->hasConstant());
553         ASSERT(info.spillFormat() == DataFormatNone);
554         ASSERT(info.fpr() == source);
555         spillAction = StoreDouble;
556     }
557         
558 #if USE(JSVALUE64)
559     if (node->hasConstant()) {
560         node->asNumber(); // To get the assertion.
561         fillAction = SetDoubleConstant;
562     } else {
563         ASSERT(info.spillFormat() == DataFormatNone || info.spillFormat() == DataFormatDouble);
564         fillAction = LoadDouble;
565     }
566 #elif USE(JSVALUE32_64)
567     ASSERT(info.registerFormat() == DataFormatDouble);
568     if (node->hasConstant()) {
569         node->asNumber(); // To get the assertion.
570         fillAction = SetDoubleConstant;
571     } else
572         fillAction = LoadDouble;
573 #endif
574
575     return SilentRegisterSavePlan(spillAction, fillAction, node, source);
576 }
577     
578 void SpeculativeJIT::silentSpill(const SilentRegisterSavePlan& plan)
579 {
580     switch (plan.spillAction()) {
581     case DoNothingForSpill:
582         break;
583     case Store32Tag:
584         m_jit.store32(plan.gpr(), JITCompiler::tagFor(plan.node()->virtualRegister()));
585         break;
586     case Store32Payload:
587         m_jit.store32(plan.gpr(), JITCompiler::payloadFor(plan.node()->virtualRegister()));
588         break;
589     case StorePtr:
590         m_jit.storePtr(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
591         break;
592 #if USE(JSVALUE64)
593     case Store64:
594         m_jit.store64(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
595         break;
596 #endif
597     case StoreDouble:
598         m_jit.storeDouble(plan.fpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
599         break;
600     default:
601         RELEASE_ASSERT_NOT_REACHED();
602     }
603 }
604     
605 void SpeculativeJIT::silentFill(const SilentRegisterSavePlan& plan, GPRReg canTrample)
606 {
607 #if USE(JSVALUE32_64)
608     UNUSED_PARAM(canTrample);
609 #endif
610     switch (plan.fillAction()) {
611     case DoNothingForFill:
612         break;
613     case SetInt32Constant:
614         m_jit.move(Imm32(plan.node()->asInt32()), plan.gpr());
615         break;
616 #if USE(JSVALUE64)
617     case SetInt52Constant:
618         m_jit.move(Imm64(plan.node()->asAnyInt() << JSValue::int52ShiftAmount), plan.gpr());
619         break;
620     case SetStrictInt52Constant:
621         m_jit.move(Imm64(plan.node()->asAnyInt()), plan.gpr());
622         break;
623 #endif // USE(JSVALUE64)
624     case SetBooleanConstant:
625         m_jit.move(TrustedImm32(plan.node()->asBoolean()), plan.gpr());
626         break;
627     case SetCellConstant:
628         m_jit.move(TrustedImmPtr(plan.node()->asCell()), plan.gpr());
629         break;
630 #if USE(JSVALUE64)
631     case SetTrustedJSConstant:
632         m_jit.move(valueOfJSConstantAsImm64(plan.node()).asTrustedImm64(), plan.gpr());
633         break;
634     case SetJSConstant:
635         m_jit.move(valueOfJSConstantAsImm64(plan.node()), plan.gpr());
636         break;
637     case SetDoubleConstant:
638         m_jit.move(Imm64(reinterpretDoubleToInt64(plan.node()->asNumber())), canTrample);
639         m_jit.move64ToDouble(canTrample, plan.fpr());
640         break;
641     case Load32PayloadBoxInt:
642         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
643         m_jit.or64(GPRInfo::tagTypeNumberRegister, plan.gpr());
644         break;
645     case Load32PayloadConvertToInt52:
646         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
647         m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
648         m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
649         break;
650     case Load32PayloadSignExtend:
651         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
652         m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
653         break;
654 #else
655     case SetJSConstantTag:
656         m_jit.move(Imm32(plan.node()->asJSValue().tag()), plan.gpr());
657         break;
658     case SetJSConstantPayload:
659         m_jit.move(Imm32(plan.node()->asJSValue().payload()), plan.gpr());
660         break;
661     case SetInt32Tag:
662         m_jit.move(TrustedImm32(JSValue::Int32Tag), plan.gpr());
663         break;
664     case SetCellTag:
665         m_jit.move(TrustedImm32(JSValue::CellTag), plan.gpr());
666         break;
667     case SetBooleanTag:
668         m_jit.move(TrustedImm32(JSValue::BooleanTag), plan.gpr());
669         break;
670     case SetDoubleConstant:
671         m_jit.loadDouble(TrustedImmPtr(m_jit.addressOfDoubleConstant(plan.node())), plan.fpr());
672         break;
673 #endif
674     case Load32Tag:
675         m_jit.load32(JITCompiler::tagFor(plan.node()->virtualRegister()), plan.gpr());
676         break;
677     case Load32Payload:
678         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
679         break;
680     case LoadPtr:
681         m_jit.loadPtr(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
682         break;
683 #if USE(JSVALUE64)
684     case Load64:
685         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
686         break;
687     case Load64ShiftInt52Right:
688         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
689         m_jit.rshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
690         break;
691     case Load64ShiftInt52Left:
692         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
693         m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
694         break;
695 #endif
696     case LoadDouble:
697         m_jit.loadDouble(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.fpr());
698         break;
699     default:
700         RELEASE_ASSERT_NOT_REACHED();
701     }
702 }
703     
704 JITCompiler::Jump SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode, IndexingType shape)
705 {
706     switch (arrayMode.arrayClass()) {
707     case Array::OriginalArray: {
708         CRASH();
709 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
710         JITCompiler::Jump result; // I already know that VC++ takes unkindly to the expression "return Jump()", so I'm doing it this way in anticipation of someone eventually using VC++ to compile the DFG.
711         return result;
712 #endif
713     }
714         
715     case Array::Array:
716         m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
717         return m_jit.branch32(
718             MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | shape));
719         
720     case Array::NonArray:
721     case Array::OriginalNonArray:
722         m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
723         return m_jit.branch32(
724             MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
725         
726     case Array::PossiblyArray:
727         m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
728         return m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
729     }
730     
731     RELEASE_ASSERT_NOT_REACHED();
732     return JITCompiler::Jump();
733 }
734
735 JITCompiler::JumpList SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode)
736 {
737     JITCompiler::JumpList result;
738     
739     switch (arrayMode.type()) {
740     case Array::Int32:
741         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, Int32Shape);
742
743     case Array::Double:
744         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, DoubleShape);
745
746     case Array::Contiguous:
747         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, ContiguousShape);
748
749     case Array::Undecided:
750         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, UndecidedShape);
751
752     case Array::ArrayStorage:
753     case Array::SlowPutArrayStorage: {
754         ASSERT(!arrayMode.isJSArrayWithOriginalStructure());
755         
756         if (arrayMode.isJSArray()) {
757             if (arrayMode.isSlowPut()) {
758                 result.append(
759                     m_jit.branchTest32(
760                         MacroAssembler::Zero, tempGPR, MacroAssembler::TrustedImm32(IsArray)));
761                 m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
762                 m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
763                 result.append(
764                     m_jit.branch32(
765                         MacroAssembler::Above, tempGPR,
766                         TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
767                 break;
768             }
769             m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
770             result.append(
771                 m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | ArrayStorageShape)));
772             break;
773         }
774         m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
775         if (arrayMode.isSlowPut()) {
776             m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
777             result.append(
778                 m_jit.branch32(
779                     MacroAssembler::Above, tempGPR,
780                     TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
781             break;
782         }
783         result.append(
784             m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(ArrayStorageShape)));
785         break;
786     }
787     default:
788         CRASH();
789         break;
790     }
791     
792     return result;
793 }
794
795 void SpeculativeJIT::checkArray(Node* node)
796 {
797     ASSERT(node->arrayMode().isSpecific());
798     ASSERT(!node->arrayMode().doesConversion());
799     
800     SpeculateCellOperand base(this, node->child1());
801     GPRReg baseReg = base.gpr();
802     
803     if (node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1()))) {
804         noResult(m_currentNode);
805         return;
806     }
807     
808     const ClassInfo* expectedClassInfo = 0;
809     
810     switch (node->arrayMode().type()) {
811     case Array::AnyTypedArray:
812     case Array::String:
813         RELEASE_ASSERT_NOT_REACHED(); // Should have been a Phantom(String:)
814         break;
815     case Array::Int32:
816     case Array::Double:
817     case Array::Contiguous:
818     case Array::Undecided:
819     case Array::ArrayStorage:
820     case Array::SlowPutArrayStorage: {
821         GPRTemporary temp(this);
822         GPRReg tempGPR = temp.gpr();
823         m_jit.load8(MacroAssembler::Address(baseReg, JSCell::indexingTypeOffset()), tempGPR);
824         speculationCheck(
825             BadIndexingType, JSValueSource::unboxedCell(baseReg), 0,
826             jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
827         
828         noResult(m_currentNode);
829         return;
830     }
831     case Array::DirectArguments:
832         speculateCellTypeWithoutTypeFiltering(node->child1(), baseReg, DirectArgumentsType);
833         noResult(m_currentNode);
834         return;
835     case Array::ScopedArguments:
836         speculateCellTypeWithoutTypeFiltering(node->child1(), baseReg, ScopedArgumentsType);
837         noResult(m_currentNode);
838         return;
839     default:
840         speculateCellTypeWithoutTypeFiltering(
841             node->child1(), baseReg,
842             typeForTypedArrayType(node->arrayMode().typedArrayType()));
843         noResult(m_currentNode);
844         return;
845     }
846     
847     RELEASE_ASSERT(expectedClassInfo);
848     
849     GPRTemporary temp(this);
850     GPRTemporary temp2(this);
851     m_jit.emitLoadStructure(baseReg, temp.gpr(), temp2.gpr());
852     speculationCheck(
853         BadType, JSValueSource::unboxedCell(baseReg), node,
854         m_jit.branchPtr(
855             MacroAssembler::NotEqual,
856             MacroAssembler::Address(temp.gpr(), Structure::classInfoOffset()),
857             MacroAssembler::TrustedImmPtr(expectedClassInfo)));
858     
859     noResult(m_currentNode);
860 }
861
862 void SpeculativeJIT::arrayify(Node* node, GPRReg baseReg, GPRReg propertyReg)
863 {
864     ASSERT(node->arrayMode().doesConversion());
865     
866     GPRTemporary temp(this);
867     GPRTemporary structure;
868     GPRReg tempGPR = temp.gpr();
869     GPRReg structureGPR = InvalidGPRReg;
870     
871     if (node->op() != ArrayifyToStructure) {
872         GPRTemporary realStructure(this);
873         structure.adopt(realStructure);
874         structureGPR = structure.gpr();
875     }
876         
877     // We can skip all that comes next if we already have array storage.
878     MacroAssembler::JumpList slowPath;
879     
880     if (node->op() == ArrayifyToStructure) {
881         slowPath.append(m_jit.branchWeakStructure(
882             JITCompiler::NotEqual,
883             JITCompiler::Address(baseReg, JSCell::structureIDOffset()),
884             node->structure()));
885     } else {
886         m_jit.load8(
887             MacroAssembler::Address(baseReg, JSCell::indexingTypeOffset()), tempGPR);
888         
889         slowPath.append(jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
890     }
891     
892     addSlowPathGenerator(std::make_unique<ArrayifySlowPathGenerator>(
893         slowPath, this, node, baseReg, propertyReg, tempGPR, structureGPR));
894     
895     noResult(m_currentNode);
896 }
897
898 void SpeculativeJIT::arrayify(Node* node)
899 {
900     ASSERT(node->arrayMode().isSpecific());
901     
902     SpeculateCellOperand base(this, node->child1());
903     
904     if (!node->child2()) {
905         arrayify(node, base.gpr(), InvalidGPRReg);
906         return;
907     }
908     
909     SpeculateInt32Operand property(this, node->child2());
910     
911     arrayify(node, base.gpr(), property.gpr());
912 }
913
914 GPRReg SpeculativeJIT::fillStorage(Edge edge)
915 {
916     VirtualRegister virtualRegister = edge->virtualRegister();
917     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
918     
919     switch (info.registerFormat()) {
920     case DataFormatNone: {
921         if (info.spillFormat() == DataFormatStorage) {
922             GPRReg gpr = allocate();
923             m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
924             m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr);
925             info.fillStorage(*m_stream, gpr);
926             return gpr;
927         }
928         
929         // Must be a cell; fill it as a cell and then return the pointer.
930         return fillSpeculateCell(edge);
931     }
932         
933     case DataFormatStorage: {
934         GPRReg gpr = info.gpr();
935         m_gprs.lock(gpr);
936         return gpr;
937     }
938         
939     default:
940         return fillSpeculateCell(edge);
941     }
942 }
943
944 void SpeculativeJIT::useChildren(Node* node)
945 {
946     if (node->flags() & NodeHasVarArgs) {
947         for (unsigned childIdx = node->firstChild(); childIdx < node->firstChild() + node->numChildren(); childIdx++) {
948             if (!!m_jit.graph().m_varArgChildren[childIdx])
949                 use(m_jit.graph().m_varArgChildren[childIdx]);
950         }
951     } else {
952         Edge child1 = node->child1();
953         if (!child1) {
954             ASSERT(!node->child2() && !node->child3());
955             return;
956         }
957         use(child1);
958         
959         Edge child2 = node->child2();
960         if (!child2) {
961             ASSERT(!node->child3());
962             return;
963         }
964         use(child2);
965         
966         Edge child3 = node->child3();
967         if (!child3)
968             return;
969         use(child3);
970     }
971 }
972
973 void SpeculativeJIT::compileTryGetById(Node* node)
974 {
975     switch (node->child1().useKind()) {
976     case CellUse: {
977         SpeculateCellOperand base(this, node->child1());
978         JSValueRegsTemporary result(this, Reuse, base);
979
980         JSValueRegs baseRegs = JSValueRegs::payloadOnly(base.gpr());
981         JSValueRegs resultRegs = result.regs();
982
983         base.use();
984
985         cachedGetById(node->origin.semantic, baseRegs, resultRegs, node->identifierNumber(), JITCompiler::Jump(), NeedToSpill, AccessType::GetPure);
986
987         jsValueResult(resultRegs, node, DataFormatJS, UseChildrenCalledExplicitly);
988         break;
989     }
990
991     case UntypedUse: {
992         JSValueOperand base(this, node->child1());
993         JSValueRegsTemporary result(this, Reuse, base);
994
995         JSValueRegs baseRegs = base.jsValueRegs();
996         JSValueRegs resultRegs = result.regs();
997
998         base.use();
999
1000         JITCompiler::Jump notCell = m_jit.branchIfNotCell(baseRegs);
1001
1002         cachedGetById(node->origin.semantic, baseRegs, resultRegs, node->identifierNumber(), notCell, DontSpill, AccessType::GetPure);
1003
1004         jsValueResult(resultRegs, node, DataFormatJS, UseChildrenCalledExplicitly);
1005         break;
1006     }
1007
1008     default:
1009         DFG_CRASH(m_jit.graph(), node, "Bad use kind");
1010         break;
1011     } 
1012 }
1013
1014 void SpeculativeJIT::compileIn(Node* node)
1015 {
1016     SpeculateCellOperand base(this, node->child2());
1017     GPRReg baseGPR = base.gpr();
1018     
1019     if (JSString* string = node->child1()->dynamicCastConstant<JSString*>()) {
1020         if (string->tryGetValueImpl() && string->tryGetValueImpl()->isAtomic()) {
1021             StructureStubInfo* stubInfo = m_jit.codeBlock()->addStubInfo(AccessType::In);
1022             
1023             GPRTemporary result(this);
1024             GPRReg resultGPR = result.gpr();
1025
1026             use(node->child1());
1027             
1028             MacroAssembler::PatchableJump jump = m_jit.patchableJump();
1029             MacroAssembler::Label done = m_jit.label();
1030             
1031             // Since this block is executed only when the result of string->tryGetValueImpl() is atomic,
1032             // we can cast it to const AtomicStringImpl* safely.
1033             auto slowPath = slowPathCall(
1034                 jump.m_jump, this, operationInOptimize,
1035                 JSValueRegs::payloadOnly(resultGPR), stubInfo, baseGPR,
1036                 static_cast<const AtomicStringImpl*>(string->tryGetValueImpl()));
1037             
1038             stubInfo->callSiteIndex = m_jit.addCallSite(node->origin.semantic);
1039             stubInfo->codeOrigin = node->origin.semantic;
1040             stubInfo->patch.baseGPR = static_cast<int8_t>(baseGPR);
1041             stubInfo->patch.valueGPR = static_cast<int8_t>(resultGPR);
1042 #if USE(JSVALUE32_64)
1043             stubInfo->patch.valueTagGPR = static_cast<int8_t>(InvalidGPRReg);
1044             stubInfo->patch.baseTagGPR = static_cast<int8_t>(InvalidGPRReg);
1045 #endif
1046             stubInfo->patch.usedRegisters = usedRegisters();
1047
1048             m_jit.addIn(InRecord(jump, done, slowPath.get(), stubInfo));
1049             addSlowPathGenerator(WTFMove(slowPath));
1050
1051             base.use();
1052
1053             blessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
1054             return;
1055         }
1056     }
1057
1058     JSValueOperand key(this, node->child1());
1059     JSValueRegs regs = key.jsValueRegs();
1060         
1061     GPRFlushedCallResult result(this);
1062     GPRReg resultGPR = result.gpr();
1063         
1064     base.use();
1065     key.use();
1066         
1067     flushRegisters();
1068     callOperation(
1069         operationGenericIn, extractResult(JSValueRegs::payloadOnly(resultGPR)),
1070         baseGPR, regs);
1071     m_jit.exceptionCheck();
1072     blessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
1073 }
1074
1075 void SpeculativeJIT::compileDeleteById(Node* node)
1076 {
1077     JSValueOperand value(this, node->child1());
1078     GPRFlushedCallResult result(this);
1079
1080     JSValueRegs valueRegs = value.jsValueRegs();
1081     GPRReg resultGPR = result.gpr();
1082
1083     value.use();
1084
1085     flushRegisters();
1086     callOperation(operationDeleteById, resultGPR, valueRegs, identifierUID(node->identifierNumber()));
1087     m_jit.exceptionCheck();
1088
1089     unblessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
1090 }
1091
1092 void SpeculativeJIT::compileDeleteByVal(Node* node)
1093 {
1094     JSValueOperand base(this, node->child1());
1095     JSValueOperand key(this, node->child2());
1096     GPRFlushedCallResult result(this);
1097
1098     JSValueRegs baseRegs = base.jsValueRegs();
1099     JSValueRegs keyRegs = key.jsValueRegs();
1100     GPRReg resultGPR = result.gpr();
1101
1102     base.use();
1103     key.use();
1104
1105     flushRegisters();
1106     callOperation(operationDeleteByVal, resultGPR, baseRegs, keyRegs);
1107     m_jit.exceptionCheck();
1108
1109     unblessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
1110 }
1111
1112 bool SpeculativeJIT::nonSpeculativeCompare(Node* node, MacroAssembler::RelationalCondition cond, S_JITOperation_EJJ helperFunction)
1113 {
1114     unsigned branchIndexInBlock = detectPeepHoleBranch();
1115     if (branchIndexInBlock != UINT_MAX) {
1116         Node* branchNode = m_block->at(branchIndexInBlock);
1117
1118         ASSERT(node->adjustedRefCount() == 1);
1119         
1120         nonSpeculativePeepholeBranch(node, branchNode, cond, helperFunction);
1121     
1122         m_indexInBlock = branchIndexInBlock;
1123         m_currentNode = branchNode;
1124         
1125         return true;
1126     }
1127     
1128     nonSpeculativeNonPeepholeCompare(node, cond, helperFunction);
1129     
1130     return false;
1131 }
1132
1133 bool SpeculativeJIT::nonSpeculativeStrictEq(Node* node, bool invert)
1134 {
1135     unsigned branchIndexInBlock = detectPeepHoleBranch();
1136     if (branchIndexInBlock != UINT_MAX) {
1137         Node* branchNode = m_block->at(branchIndexInBlock);
1138
1139         ASSERT(node->adjustedRefCount() == 1);
1140         
1141         nonSpeculativePeepholeStrictEq(node, branchNode, invert);
1142     
1143         m_indexInBlock = branchIndexInBlock;
1144         m_currentNode = branchNode;
1145         
1146         return true;
1147     }
1148     
1149     nonSpeculativeNonPeepholeStrictEq(node, invert);
1150     
1151     return false;
1152 }
1153
1154 static const char* dataFormatString(DataFormat format)
1155 {
1156     // These values correspond to the DataFormat enum.
1157     const char* strings[] = {
1158         "[  ]",
1159         "[ i]",
1160         "[ d]",
1161         "[ c]",
1162         "Err!",
1163         "Err!",
1164         "Err!",
1165         "Err!",
1166         "[J ]",
1167         "[Ji]",
1168         "[Jd]",
1169         "[Jc]",
1170         "Err!",
1171         "Err!",
1172         "Err!",
1173         "Err!",
1174     };
1175     return strings[format];
1176 }
1177
1178 void SpeculativeJIT::dump(const char* label)
1179 {
1180     if (label)
1181         dataLogF("<%s>\n", label);
1182
1183     dataLogF("  gprs:\n");
1184     m_gprs.dump();
1185     dataLogF("  fprs:\n");
1186     m_fprs.dump();
1187     dataLogF("  VirtualRegisters:\n");
1188     for (unsigned i = 0; i < m_generationInfo.size(); ++i) {
1189         GenerationInfo& info = m_generationInfo[i];
1190         if (info.alive())
1191             dataLogF("    % 3d:%s%s", i, dataFormatString(info.registerFormat()), dataFormatString(info.spillFormat()));
1192         else
1193             dataLogF("    % 3d:[__][__]", i);
1194         if (info.registerFormat() == DataFormatDouble)
1195             dataLogF(":fpr%d\n", info.fpr());
1196         else if (info.registerFormat() != DataFormatNone
1197 #if USE(JSVALUE32_64)
1198             && !(info.registerFormat() & DataFormatJS)
1199 #endif
1200             ) {
1201             ASSERT(info.gpr() != InvalidGPRReg);
1202             dataLogF(":%s\n", GPRInfo::debugName(info.gpr()));
1203         } else
1204             dataLogF("\n");
1205     }
1206     if (label)
1207         dataLogF("</%s>\n", label);
1208 }
1209
1210 GPRTemporary::GPRTemporary()
1211     : m_jit(0)
1212     , m_gpr(InvalidGPRReg)
1213 {
1214 }
1215
1216 GPRTemporary::GPRTemporary(SpeculativeJIT* jit)
1217     : m_jit(jit)
1218     , m_gpr(InvalidGPRReg)
1219 {
1220     m_gpr = m_jit->allocate();
1221 }
1222
1223 GPRTemporary::GPRTemporary(SpeculativeJIT* jit, GPRReg specific)
1224     : m_jit(jit)
1225     , m_gpr(InvalidGPRReg)
1226 {
1227     m_gpr = m_jit->allocate(specific);
1228 }
1229
1230 #if USE(JSVALUE32_64)
1231 GPRTemporary::GPRTemporary(
1232     SpeculativeJIT* jit, ReuseTag, JSValueOperand& op1, WhichValueWord which)
1233     : m_jit(jit)
1234     , m_gpr(InvalidGPRReg)
1235 {
1236     if (!op1.isDouble() && m_jit->canReuse(op1.node()))
1237         m_gpr = m_jit->reuse(op1.gpr(which));
1238     else
1239         m_gpr = m_jit->allocate();
1240 }
1241 #endif // USE(JSVALUE32_64)
1242
1243 JSValueRegsTemporary::JSValueRegsTemporary() { }
1244
1245 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit)
1246 #if USE(JSVALUE64)
1247     : m_gpr(jit)
1248 #else
1249     : m_payloadGPR(jit)
1250     , m_tagGPR(jit)
1251 #endif
1252 {
1253 }
1254
1255 #if USE(JSVALUE64)
1256 template<typename T>
1257 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit, ReuseTag, T& operand, WhichValueWord)
1258     : m_gpr(jit, Reuse, operand)
1259 {
1260 }
1261 #else
1262 template<typename T>
1263 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit, ReuseTag, T& operand, WhichValueWord resultWord)
1264 {
1265     if (resultWord == PayloadWord) {
1266         m_payloadGPR = GPRTemporary(jit, Reuse, operand);
1267         m_tagGPR = GPRTemporary(jit);
1268     } else {
1269         m_payloadGPR = GPRTemporary(jit);
1270         m_tagGPR = GPRTemporary(jit, Reuse, operand);
1271     }
1272 }
1273 #endif
1274
1275 #if USE(JSVALUE64)
1276 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit, ReuseTag, JSValueOperand& operand)
1277 {
1278     m_gpr = GPRTemporary(jit, Reuse, operand);
1279 }
1280 #else
1281 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit, ReuseTag, JSValueOperand& operand)
1282 {
1283     if (jit->canReuse(operand.node())) {
1284         m_payloadGPR = GPRTemporary(jit, Reuse, operand, PayloadWord);
1285         m_tagGPR = GPRTemporary(jit, Reuse, operand, TagWord);
1286     } else {
1287         m_payloadGPR = GPRTemporary(jit);
1288         m_tagGPR = GPRTemporary(jit);
1289     }
1290 }
1291 #endif
1292
1293 JSValueRegsTemporary::~JSValueRegsTemporary() { }
1294
1295 JSValueRegs JSValueRegsTemporary::regs()
1296 {
1297 #if USE(JSVALUE64)
1298     return JSValueRegs(m_gpr.gpr());
1299 #else
1300     return JSValueRegs(m_tagGPR.gpr(), m_payloadGPR.gpr());
1301 #endif
1302 }
1303
1304 void GPRTemporary::adopt(GPRTemporary& other)
1305 {
1306     ASSERT(!m_jit);
1307     ASSERT(m_gpr == InvalidGPRReg);
1308     ASSERT(other.m_jit);
1309     ASSERT(other.m_gpr != InvalidGPRReg);
1310     m_jit = other.m_jit;
1311     m_gpr = other.m_gpr;
1312     other.m_jit = 0;
1313     other.m_gpr = InvalidGPRReg;
1314 }
1315
1316 FPRTemporary::FPRTemporary(SpeculativeJIT* jit)
1317     : m_jit(jit)
1318     , m_fpr(InvalidFPRReg)
1319 {
1320     m_fpr = m_jit->fprAllocate();
1321 }
1322
1323 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1)
1324     : m_jit(jit)
1325     , m_fpr(InvalidFPRReg)
1326 {
1327     if (m_jit->canReuse(op1.node()))
1328         m_fpr = m_jit->reuse(op1.fpr());
1329     else
1330         m_fpr = m_jit->fprAllocate();
1331 }
1332
1333 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1, SpeculateDoubleOperand& op2)
1334     : m_jit(jit)
1335     , m_fpr(InvalidFPRReg)
1336 {
1337     if (m_jit->canReuse(op1.node()))
1338         m_fpr = m_jit->reuse(op1.fpr());
1339     else if (m_jit->canReuse(op2.node()))
1340         m_fpr = m_jit->reuse(op2.fpr());
1341     else if (m_jit->canReuse(op1.node(), op2.node()) && op1.fpr() == op2.fpr())
1342         m_fpr = m_jit->reuse(op1.fpr());
1343     else
1344         m_fpr = m_jit->fprAllocate();
1345 }
1346
1347 #if USE(JSVALUE32_64)
1348 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, JSValueOperand& op1)
1349     : m_jit(jit)
1350     , m_fpr(InvalidFPRReg)
1351 {
1352     if (op1.isDouble() && m_jit->canReuse(op1.node()))
1353         m_fpr = m_jit->reuse(op1.fpr());
1354     else
1355         m_fpr = m_jit->fprAllocate();
1356 }
1357 #endif
1358
1359 void SpeculativeJIT::compilePeepHoleDoubleBranch(Node* node, Node* branchNode, JITCompiler::DoubleCondition condition)
1360 {
1361     BasicBlock* taken = branchNode->branchData()->taken.block;
1362     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1363
1364     if (taken == nextBlock()) {
1365         condition = MacroAssembler::invert(condition);
1366         std::swap(taken, notTaken);
1367     }
1368
1369     SpeculateDoubleOperand op1(this, node->child1());
1370     SpeculateDoubleOperand op2(this, node->child2());
1371     
1372     branchDouble(condition, op1.fpr(), op2.fpr(), taken);
1373     jump(notTaken);
1374 }
1375
1376 void SpeculativeJIT::compilePeepHoleObjectEquality(Node* node, Node* branchNode)
1377 {
1378     BasicBlock* taken = branchNode->branchData()->taken.block;
1379     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1380
1381     MacroAssembler::RelationalCondition condition = MacroAssembler::Equal;
1382     
1383     if (taken == nextBlock()) {
1384         condition = MacroAssembler::NotEqual;
1385         BasicBlock* tmp = taken;
1386         taken = notTaken;
1387         notTaken = tmp;
1388     }
1389
1390     SpeculateCellOperand op1(this, node->child1());
1391     SpeculateCellOperand op2(this, node->child2());
1392     
1393     GPRReg op1GPR = op1.gpr();
1394     GPRReg op2GPR = op2.gpr();
1395     
1396     if (masqueradesAsUndefinedWatchpointIsStillValid()) {
1397         if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1398             speculationCheck(
1399                 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(), m_jit.branchIfNotObject(op1GPR));
1400         }
1401         if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1402             speculationCheck(
1403                 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(), m_jit.branchIfNotObject(op2GPR));
1404         }
1405     } else {
1406         if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1407             speculationCheck(
1408                 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1409                 m_jit.branchIfNotObject(op1GPR));
1410         }
1411         speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1412             m_jit.branchTest8(
1413                 MacroAssembler::NonZero, 
1414                 MacroAssembler::Address(op1GPR, JSCell::typeInfoFlagsOffset()), 
1415                 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1416
1417         if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1418             speculationCheck(
1419                 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1420                 m_jit.branchIfNotObject(op2GPR));
1421         }
1422         speculationCheck(BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1423             m_jit.branchTest8(
1424                 MacroAssembler::NonZero, 
1425                 MacroAssembler::Address(op2GPR, JSCell::typeInfoFlagsOffset()), 
1426                 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1427     }
1428
1429     branchPtr(condition, op1GPR, op2GPR, taken);
1430     jump(notTaken);
1431 }
1432
1433 void SpeculativeJIT::compilePeepHoleBooleanBranch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1434 {
1435     BasicBlock* taken = branchNode->branchData()->taken.block;
1436     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1437
1438     // The branch instruction will branch to the taken block.
1439     // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1440     if (taken == nextBlock()) {
1441         condition = JITCompiler::invert(condition);
1442         BasicBlock* tmp = taken;
1443         taken = notTaken;
1444         notTaken = tmp;
1445     }
1446
1447     if (node->child1()->isInt32Constant()) {
1448         int32_t imm = node->child1()->asInt32();
1449         SpeculateBooleanOperand op2(this, node->child2());
1450         branch32(condition, JITCompiler::Imm32(imm), op2.gpr(), taken);
1451     } else if (node->child2()->isInt32Constant()) {
1452         SpeculateBooleanOperand op1(this, node->child1());
1453         int32_t imm = node->child2()->asInt32();
1454         branch32(condition, op1.gpr(), JITCompiler::Imm32(imm), taken);
1455     } else {
1456         SpeculateBooleanOperand op1(this, node->child1());
1457         SpeculateBooleanOperand op2(this, node->child2());
1458         branch32(condition, op1.gpr(), op2.gpr(), taken);
1459     }
1460
1461     jump(notTaken);
1462 }
1463
1464 void SpeculativeJIT::compilePeepHoleInt32Branch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1465 {
1466     BasicBlock* taken = branchNode->branchData()->taken.block;
1467     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1468
1469     // The branch instruction will branch to the taken block.
1470     // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1471     if (taken == nextBlock()) {
1472         condition = JITCompiler::invert(condition);
1473         BasicBlock* tmp = taken;
1474         taken = notTaken;
1475         notTaken = tmp;
1476     }
1477
1478     if (node->child1()->isInt32Constant()) {
1479         int32_t imm = node->child1()->asInt32();
1480         SpeculateInt32Operand op2(this, node->child2());
1481         branch32(condition, JITCompiler::Imm32(imm), op2.gpr(), taken);
1482     } else if (node->child2()->isInt32Constant()) {
1483         SpeculateInt32Operand op1(this, node->child1());
1484         int32_t imm = node->child2()->asInt32();
1485         branch32(condition, op1.gpr(), JITCompiler::Imm32(imm), taken);
1486     } else {
1487         SpeculateInt32Operand op1(this, node->child1());
1488         SpeculateInt32Operand op2(this, node->child2());
1489         branch32(condition, op1.gpr(), op2.gpr(), taken);
1490     }
1491
1492     jump(notTaken);
1493 }
1494
1495 // Returns true if the compare is fused with a subsequent branch.
1496 bool SpeculativeJIT::compilePeepHoleBranch(Node* node, MacroAssembler::RelationalCondition condition, MacroAssembler::DoubleCondition doubleCondition, S_JITOperation_EJJ operation)
1497 {
1498     // Fused compare & branch.
1499     unsigned branchIndexInBlock = detectPeepHoleBranch();
1500     if (branchIndexInBlock != UINT_MAX) {
1501         Node* branchNode = m_block->at(branchIndexInBlock);
1502
1503         // detectPeepHoleBranch currently only permits the branch to be the very next node,
1504         // so can be no intervening nodes to also reference the compare. 
1505         ASSERT(node->adjustedRefCount() == 1);
1506
1507         if (node->isBinaryUseKind(Int32Use))
1508             compilePeepHoleInt32Branch(node, branchNode, condition);
1509 #if USE(JSVALUE64)
1510         else if (node->isBinaryUseKind(Int52RepUse))
1511             compilePeepHoleInt52Branch(node, branchNode, condition);
1512 #endif // USE(JSVALUE64)
1513         else if (node->isBinaryUseKind(StringUse) || node->isBinaryUseKind(StringIdentUse)) {
1514             // Use non-peephole comparison, for now.
1515             return false;
1516         } else if (node->isBinaryUseKind(DoubleRepUse))
1517             compilePeepHoleDoubleBranch(node, branchNode, doubleCondition);
1518         else if (node->op() == CompareEq) {
1519             if (node->isBinaryUseKind(BooleanUse))
1520                 compilePeepHoleBooleanBranch(node, branchNode, condition);
1521             else if (node->isBinaryUseKind(SymbolUse))
1522                 compilePeepHoleSymbolEquality(node, branchNode);
1523             else if (node->isBinaryUseKind(ObjectUse))
1524                 compilePeepHoleObjectEquality(node, branchNode);
1525             else if (node->isBinaryUseKind(ObjectUse, ObjectOrOtherUse))
1526                 compilePeepHoleObjectToObjectOrOtherEquality(node->child1(), node->child2(), branchNode);
1527             else if (node->isBinaryUseKind(ObjectOrOtherUse, ObjectUse))
1528                 compilePeepHoleObjectToObjectOrOtherEquality(node->child2(), node->child1(), branchNode);
1529             else if (!needsTypeCheck(node->child1(), SpecOther))
1530                 nonSpeculativePeepholeBranchNullOrUndefined(node->child2(), branchNode);
1531             else if (!needsTypeCheck(node->child2(), SpecOther))
1532                 nonSpeculativePeepholeBranchNullOrUndefined(node->child1(), branchNode);
1533             else {
1534                 nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1535                 return true;
1536             }
1537         } else {
1538             nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1539             return true;
1540         }
1541
1542         use(node->child1());
1543         use(node->child2());
1544         m_indexInBlock = branchIndexInBlock;
1545         m_currentNode = branchNode;
1546         return true;
1547     }
1548     return false;
1549 }
1550
1551 void SpeculativeJIT::noticeOSRBirth(Node* node)
1552 {
1553     if (!node->hasVirtualRegister())
1554         return;
1555     
1556     VirtualRegister virtualRegister = node->virtualRegister();
1557     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1558     
1559     info.noticeOSRBirth(*m_stream, node, virtualRegister);
1560 }
1561
1562 void SpeculativeJIT::compileMovHint(Node* node)
1563 {
1564     ASSERT(node->containsMovHint() && node->op() != ZombieHint);
1565     
1566     Node* child = node->child1().node();
1567     noticeOSRBirth(child);
1568     
1569     m_stream->appendAndLog(VariableEvent::movHint(MinifiedID(child), node->unlinkedLocal()));
1570 }
1571
1572 void SpeculativeJIT::bail(AbortReason reason)
1573 {
1574     if (verboseCompilationEnabled())
1575         dataLog("Bailing compilation.\n");
1576     m_compileOkay = true;
1577     m_jit.abortWithReason(reason, m_lastGeneratedNode);
1578     clearGenerationInfo();
1579 }
1580
1581 void SpeculativeJIT::compileCurrentBlock()
1582 {
1583     ASSERT(m_compileOkay);
1584     
1585     if (!m_block)
1586         return;
1587     
1588     ASSERT(m_block->isReachable);
1589     
1590     m_jit.blockHeads()[m_block->index] = m_jit.label();
1591
1592     if (!m_block->intersectionOfCFAHasVisited) {
1593         // Don't generate code for basic blocks that are unreachable according to CFA.
1594         // But to be sure that nobody has generated a jump to this block, drop in a
1595         // breakpoint here.
1596         m_jit.abortWithReason(DFGUnreachableBasicBlock);
1597         return;
1598     }
1599
1600     m_stream->appendAndLog(VariableEvent::reset());
1601     
1602     m_jit.jitAssertHasValidCallFrame();
1603     m_jit.jitAssertTagsInPlace();
1604     m_jit.jitAssertArgumentCountSane();
1605
1606     m_state.reset();
1607     m_state.beginBasicBlock(m_block);
1608     
1609     for (size_t i = m_block->variablesAtHead.size(); i--;) {
1610         int operand = m_block->variablesAtHead.operandForIndex(i);
1611         Node* node = m_block->variablesAtHead[i];
1612         if (!node)
1613             continue; // No need to record dead SetLocal's.
1614         
1615         VariableAccessData* variable = node->variableAccessData();
1616         DataFormat format;
1617         if (!node->refCount())
1618             continue; // No need to record dead SetLocal's.
1619         format = dataFormatFor(variable->flushFormat());
1620         m_stream->appendAndLog(
1621             VariableEvent::setLocal(
1622                 VirtualRegister(operand),
1623                 variable->machineLocal(),
1624                 format));
1625     }
1626
1627     m_origin = NodeOrigin();
1628     
1629     for (m_indexInBlock = 0; m_indexInBlock < m_block->size(); ++m_indexInBlock) {
1630         m_currentNode = m_block->at(m_indexInBlock);
1631         
1632         // We may have hit a contradiction that the CFA was aware of but that the JIT
1633         // didn't cause directly.
1634         if (!m_state.isValid()) {
1635             bail(DFGBailedAtTopOfBlock);
1636             return;
1637         }
1638
1639         m_interpreter.startExecuting();
1640         m_interpreter.executeKnownEdgeTypes(m_currentNode);
1641         m_jit.setForNode(m_currentNode);
1642         m_origin = m_currentNode->origin;
1643         if (validationEnabled())
1644             m_origin.exitOK &= mayExit(m_jit.graph(), m_currentNode) == Exits;
1645         m_lastGeneratedNode = m_currentNode->op();
1646         
1647         ASSERT(m_currentNode->shouldGenerate());
1648         
1649         if (verboseCompilationEnabled()) {
1650             dataLogF(
1651                 "SpeculativeJIT generating Node @%d (bc#%u) at JIT offset 0x%x",
1652                 (int)m_currentNode->index(),
1653                 m_currentNode->origin.semantic.bytecodeIndex, m_jit.debugOffset());
1654             dataLog("\n");
1655         }
1656
1657         if (Options::validateDFGExceptionHandling() && (mayExit(m_jit.graph(), m_currentNode) != DoesNotExit || m_currentNode->isTerminal()))
1658             m_jit.jitReleaseAssertNoException();
1659
1660         m_jit.pcToCodeOriginMapBuilder().appendItem(m_jit.label(), m_origin.semantic);
1661
1662         compile(m_currentNode);
1663         
1664         if (belongsInMinifiedGraph(m_currentNode->op()))
1665             m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode));
1666         
1667 #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
1668         m_jit.clearRegisterAllocationOffsets();
1669 #endif
1670         
1671         if (!m_compileOkay) {
1672             bail(DFGBailedAtEndOfNode);
1673             return;
1674         }
1675         
1676         // Make sure that the abstract state is rematerialized for the next node.
1677         m_interpreter.executeEffects(m_indexInBlock);
1678     }
1679     
1680     // Perform the most basic verification that children have been used correctly.
1681     if (!ASSERT_DISABLED) {
1682         for (unsigned index = 0; index < m_generationInfo.size(); ++index) {
1683             GenerationInfo& info = m_generationInfo[index];
1684             RELEASE_ASSERT(!info.alive());
1685         }
1686     }
1687 }
1688
1689 // If we are making type predictions about our arguments then
1690 // we need to check that they are correct on function entry.
1691 void SpeculativeJIT::checkArgumentTypes()
1692 {
1693     ASSERT(!m_currentNode);
1694     m_origin = NodeOrigin(CodeOrigin(0), CodeOrigin(0), true);
1695
1696     for (int i = 0; i < m_jit.codeBlock()->numParameters(); ++i) {
1697         Node* node = m_jit.graph().m_arguments[i];
1698         if (!node) {
1699             // The argument is dead. We don't do any checks for such arguments.
1700             continue;
1701         }
1702         
1703         ASSERT(node->op() == SetArgument);
1704         ASSERT(node->shouldGenerate());
1705
1706         VariableAccessData* variableAccessData = node->variableAccessData();
1707         FlushFormat format = variableAccessData->flushFormat();
1708         
1709         if (format == FlushedJSValue)
1710             continue;
1711         
1712         VirtualRegister virtualRegister = variableAccessData->local();
1713
1714         JSValueSource valueSource = JSValueSource(JITCompiler::addressFor(virtualRegister));
1715         
1716 #if USE(JSVALUE64)
1717         switch (format) {
1718         case FlushedInt32: {
1719             speculationCheck(BadType, valueSource, node, m_jit.branch64(MacroAssembler::Below, JITCompiler::addressFor(virtualRegister), GPRInfo::tagTypeNumberRegister));
1720             break;
1721         }
1722         case FlushedBoolean: {
1723             GPRTemporary temp(this);
1724             m_jit.load64(JITCompiler::addressFor(virtualRegister), temp.gpr());
1725             m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), temp.gpr());
1726             speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, temp.gpr(), TrustedImm32(static_cast<int32_t>(~1))));
1727             break;
1728         }
1729         case FlushedCell: {
1730             speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, JITCompiler::addressFor(virtualRegister), GPRInfo::tagMaskRegister));
1731             break;
1732         }
1733         default:
1734             RELEASE_ASSERT_NOT_REACHED();
1735             break;
1736         }
1737 #else
1738         switch (format) {
1739         case FlushedInt32: {
1740             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::Int32Tag)));
1741             break;
1742         }
1743         case FlushedBoolean: {
1744             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::BooleanTag)));
1745             break;
1746         }
1747         case FlushedCell: {
1748             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::CellTag)));
1749             break;
1750         }
1751         default:
1752             RELEASE_ASSERT_NOT_REACHED();
1753             break;
1754         }
1755 #endif
1756     }
1757
1758     m_origin = NodeOrigin();
1759 }
1760
1761 bool SpeculativeJIT::compile()
1762 {
1763     checkArgumentTypes();
1764     
1765     ASSERT(!m_currentNode);
1766     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1767         m_jit.setForBlockIndex(blockIndex);
1768         m_block = m_jit.graph().block(blockIndex);
1769         compileCurrentBlock();
1770     }
1771     linkBranches();
1772     return true;
1773 }
1774
1775 void SpeculativeJIT::createOSREntries()
1776 {
1777     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1778         BasicBlock* block = m_jit.graph().block(blockIndex);
1779         if (!block)
1780             continue;
1781         if (!block->isOSRTarget)
1782             continue;
1783         
1784         // Currently we don't have OSR entry trampolines. We could add them
1785         // here if need be.
1786         m_osrEntryHeads.append(m_jit.blockHeads()[blockIndex]);
1787     }
1788 }
1789
1790 void SpeculativeJIT::linkOSREntries(LinkBuffer& linkBuffer)
1791 {
1792     unsigned osrEntryIndex = 0;
1793     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1794         BasicBlock* block = m_jit.graph().block(blockIndex);
1795         if (!block)
1796             continue;
1797         if (!block->isOSRTarget)
1798             continue;
1799         m_jit.noticeOSREntry(*block, m_osrEntryHeads[osrEntryIndex++], linkBuffer);
1800     }
1801     ASSERT(osrEntryIndex == m_osrEntryHeads.size());
1802     
1803     if (verboseCompilationEnabled()) {
1804         DumpContext dumpContext;
1805         dataLog("OSR Entries:\n");
1806         for (OSREntryData& entryData : m_jit.jitCode()->osrEntry)
1807             dataLog("    ", inContext(entryData, &dumpContext), "\n");
1808         if (!dumpContext.isEmpty())
1809             dumpContext.dump(WTF::dataFile());
1810     }
1811 }
1812
1813 void SpeculativeJIT::compileDoublePutByVal(Node* node, SpeculateCellOperand& base, SpeculateStrictInt32Operand& property)
1814 {
1815     Edge child3 = m_jit.graph().varArgChild(node, 2);
1816     Edge child4 = m_jit.graph().varArgChild(node, 3);
1817
1818     ArrayMode arrayMode = node->arrayMode();
1819     
1820     GPRReg baseReg = base.gpr();
1821     GPRReg propertyReg = property.gpr();
1822     
1823     SpeculateDoubleOperand value(this, child3);
1824
1825     FPRReg valueReg = value.fpr();
1826     
1827     DFG_TYPE_CHECK(
1828         JSValueRegs(), child3, SpecFullRealNumber,
1829         m_jit.branchDouble(
1830             MacroAssembler::DoubleNotEqualOrUnordered, valueReg, valueReg));
1831     
1832     if (!m_compileOkay)
1833         return;
1834     
1835     StorageOperand storage(this, child4);
1836     GPRReg storageReg = storage.gpr();
1837
1838     if (node->op() == PutByValAlias) {
1839         // Store the value to the array.
1840         GPRReg propertyReg = property.gpr();
1841         FPRReg valueReg = value.fpr();
1842         m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
1843         
1844         noResult(m_currentNode);
1845         return;
1846     }
1847     
1848     GPRTemporary temporary;
1849     GPRReg temporaryReg = temporaryRegisterForPutByVal(temporary, node);
1850
1851     MacroAssembler::Jump slowCase;
1852     
1853     if (arrayMode.isInBounds()) {
1854         speculationCheck(
1855             OutOfBounds, JSValueRegs(), 0,
1856             m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())));
1857     } else {
1858         MacroAssembler::Jump inBounds = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
1859         
1860         slowCase = m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfVectorLength()));
1861         
1862         if (!arrayMode.isOutOfBounds())
1863             speculationCheck(OutOfBounds, JSValueRegs(), 0, slowCase);
1864         
1865         m_jit.add32(TrustedImm32(1), propertyReg, temporaryReg);
1866         m_jit.store32(temporaryReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
1867         
1868         inBounds.link(&m_jit);
1869     }
1870     
1871     m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
1872
1873     base.use();
1874     property.use();
1875     value.use();
1876     storage.use();
1877     
1878     if (arrayMode.isOutOfBounds()) {
1879         addSlowPathGenerator(
1880             slowPathCall(
1881                 slowCase, this,
1882                 m_jit.codeBlock()->isStrictMode() ? operationPutDoubleByValBeyondArrayBoundsStrict : operationPutDoubleByValBeyondArrayBoundsNonStrict,
1883                 NoResult, baseReg, propertyReg, valueReg));
1884     }
1885
1886     noResult(m_currentNode, UseChildrenCalledExplicitly);
1887 }
1888
1889 void SpeculativeJIT::compileGetCharCodeAt(Node* node)
1890 {
1891     SpeculateCellOperand string(this, node->child1());
1892     SpeculateStrictInt32Operand index(this, node->child2());
1893     StorageOperand storage(this, node->child3());
1894
1895     GPRReg stringReg = string.gpr();
1896     GPRReg indexReg = index.gpr();
1897     GPRReg storageReg = storage.gpr();
1898     
1899     ASSERT(speculationChecked(m_state.forNode(node->child1()).m_type, SpecString));
1900
1901     // unsigned comparison so we can filter out negative indices and indices that are too large
1902     speculationCheck(Uncountable, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::AboveOrEqual, indexReg, MacroAssembler::Address(stringReg, JSString::offsetOfLength())));
1903
1904     GPRTemporary scratch(this);
1905     GPRReg scratchReg = scratch.gpr();
1906
1907     m_jit.loadPtr(MacroAssembler::Address(stringReg, JSString::offsetOfValue()), scratchReg);
1908
1909     // Load the character into scratchReg
1910     JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
1911
1912     m_jit.load8(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesOne, 0), scratchReg);
1913     JITCompiler::Jump cont8Bit = m_jit.jump();
1914
1915     is16Bit.link(&m_jit);
1916
1917     m_jit.load16(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesTwo, 0), scratchReg);
1918
1919     cont8Bit.link(&m_jit);
1920
1921     int32Result(scratchReg, m_currentNode);
1922 }
1923
1924 void SpeculativeJIT::compileGetByValOnString(Node* node)
1925 {
1926     SpeculateCellOperand base(this, node->child1());
1927     SpeculateStrictInt32Operand property(this, node->child2());
1928     StorageOperand storage(this, node->child3());
1929     GPRReg baseReg = base.gpr();
1930     GPRReg propertyReg = property.gpr();
1931     GPRReg storageReg = storage.gpr();
1932
1933     GPRTemporary scratch(this);
1934     GPRReg scratchReg = scratch.gpr();
1935 #if USE(JSVALUE32_64)
1936     GPRTemporary resultTag;
1937     GPRReg resultTagReg = InvalidGPRReg;
1938     if (node->arrayMode().isOutOfBounds()) {
1939         GPRTemporary realResultTag(this);
1940         resultTag.adopt(realResultTag);
1941         resultTagReg = resultTag.gpr();
1942     }
1943 #endif
1944
1945     ASSERT(ArrayMode(Array::String).alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
1946
1947     // unsigned comparison so we can filter out negative indices and indices that are too large
1948     JITCompiler::Jump outOfBounds = m_jit.branch32(
1949         MacroAssembler::AboveOrEqual, propertyReg,
1950         MacroAssembler::Address(baseReg, JSString::offsetOfLength()));
1951     if (node->arrayMode().isInBounds())
1952         speculationCheck(OutOfBounds, JSValueRegs(), 0, outOfBounds);
1953
1954     m_jit.loadPtr(MacroAssembler::Address(baseReg, JSString::offsetOfValue()), scratchReg);
1955
1956     // Load the character into scratchReg
1957     JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
1958
1959     m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne, 0), scratchReg);
1960     JITCompiler::Jump cont8Bit = m_jit.jump();
1961
1962     is16Bit.link(&m_jit);
1963
1964     m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo, 0), scratchReg);
1965
1966     JITCompiler::Jump bigCharacter =
1967         m_jit.branch32(MacroAssembler::AboveOrEqual, scratchReg, TrustedImm32(0x100));
1968
1969     // 8 bit string values don't need the isASCII check.
1970     cont8Bit.link(&m_jit);
1971
1972     m_jit.lshift32(MacroAssembler::TrustedImm32(sizeof(void*) == 4 ? 2 : 3), scratchReg);
1973     m_jit.addPtr(MacroAssembler::TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), scratchReg);
1974     m_jit.loadPtr(scratchReg, scratchReg);
1975
1976     addSlowPathGenerator(
1977         slowPathCall(
1978             bigCharacter, this, operationSingleCharacterString, scratchReg, scratchReg));
1979
1980     if (node->arrayMode().isOutOfBounds()) {
1981 #if USE(JSVALUE32_64)
1982         m_jit.move(TrustedImm32(JSValue::CellTag), resultTagReg);
1983 #endif
1984
1985         JSGlobalObject* globalObject = m_jit.globalObjectFor(node->origin.semantic);
1986         bool prototypeChainIsSane = false;
1987         if (globalObject->stringPrototypeChainIsSane()) {
1988             // FIXME: This could be captured using a Speculation mode that means "out-of-bounds
1989             // loads return a trivial value". Something like SaneChainOutOfBounds. This should
1990             // speculate that we don't take negative out-of-bounds, or better yet, it should rely
1991             // on a stringPrototypeChainIsSane() guaranteeing that the prototypes have no negative
1992             // indexed properties either.
1993             // https://bugs.webkit.org/show_bug.cgi?id=144668
1994             m_jit.graph().watchpoints().addLazily(globalObject->stringPrototype()->structure()->transitionWatchpointSet());
1995             m_jit.graph().watchpoints().addLazily(globalObject->objectPrototype()->structure()->transitionWatchpointSet());
1996             prototypeChainIsSane = globalObject->stringPrototypeChainIsSane();
1997         }
1998         if (prototypeChainIsSane) {
1999             m_jit.graph().watchpoints().addLazily(globalObject->stringPrototype()->structure()->transitionWatchpointSet());
2000             m_jit.graph().watchpoints().addLazily(globalObject->objectPrototype()->structure()->transitionWatchpointSet());
2001             
2002 #if USE(JSVALUE64)
2003             addSlowPathGenerator(std::make_unique<SaneStringGetByValSlowPathGenerator>(
2004                 outOfBounds, this, JSValueRegs(scratchReg), baseReg, propertyReg));
2005 #else
2006             addSlowPathGenerator(std::make_unique<SaneStringGetByValSlowPathGenerator>(
2007                 outOfBounds, this, JSValueRegs(resultTagReg, scratchReg),
2008                 baseReg, propertyReg));
2009 #endif
2010         } else {
2011 #if USE(JSVALUE64)
2012             addSlowPathGenerator(
2013                 slowPathCall(
2014                     outOfBounds, this, operationGetByValStringInt,
2015                     scratchReg, baseReg, propertyReg));
2016 #else
2017             addSlowPathGenerator(
2018                 slowPathCall(
2019                     outOfBounds, this, operationGetByValStringInt,
2020                     resultTagReg, scratchReg, baseReg, propertyReg));
2021 #endif
2022         }
2023         
2024 #if USE(JSVALUE64)
2025         jsValueResult(scratchReg, m_currentNode);
2026 #else
2027         jsValueResult(resultTagReg, scratchReg, m_currentNode);
2028 #endif
2029     } else
2030         cellResult(scratchReg, m_currentNode);
2031 }
2032
2033 void SpeculativeJIT::compileFromCharCode(Node* node)
2034 {
2035     Edge& child = node->child1();
2036     if (child.useKind() == UntypedUse) {
2037         JSValueOperand opr(this, child);
2038         JSValueRegs oprRegs = opr.jsValueRegs();
2039 #if USE(JSVALUE64)
2040         GPRTemporary result(this);
2041         JSValueRegs resultRegs = JSValueRegs(result.gpr());
2042 #else
2043         GPRTemporary resultTag(this);
2044         GPRTemporary resultPayload(this);
2045         JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
2046 #endif
2047         flushRegisters();
2048         callOperation(operationStringFromCharCodeUntyped, resultRegs, oprRegs);
2049         m_jit.exceptionCheck();
2050         
2051         jsValueResult(resultRegs, node);
2052         return;
2053     }
2054
2055     SpeculateStrictInt32Operand property(this, child);
2056     GPRReg propertyReg = property.gpr();
2057     GPRTemporary smallStrings(this);
2058     GPRTemporary scratch(this);
2059     GPRReg scratchReg = scratch.gpr();
2060     GPRReg smallStringsReg = smallStrings.gpr();
2061
2062     JITCompiler::JumpList slowCases;
2063     slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, TrustedImm32(0xff)));
2064     m_jit.move(MacroAssembler::TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), smallStringsReg);
2065     m_jit.loadPtr(MacroAssembler::BaseIndex(smallStringsReg, propertyReg, MacroAssembler::ScalePtr, 0), scratchReg);
2066
2067     slowCases.append(m_jit.branchTest32(MacroAssembler::Zero, scratchReg));
2068     addSlowPathGenerator(slowPathCall(slowCases, this, operationStringFromCharCode, scratchReg, propertyReg));
2069     cellResult(scratchReg, m_currentNode);
2070 }
2071
2072 GeneratedOperandType SpeculativeJIT::checkGeneratedTypeForToInt32(Node* node)
2073 {
2074     VirtualRegister virtualRegister = node->virtualRegister();
2075     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
2076
2077     switch (info.registerFormat()) {
2078     case DataFormatStorage:
2079         RELEASE_ASSERT_NOT_REACHED();
2080
2081     case DataFormatBoolean:
2082     case DataFormatCell:
2083         terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
2084         return GeneratedOperandTypeUnknown;
2085
2086     case DataFormatNone:
2087     case DataFormatJSCell:
2088     case DataFormatJS:
2089     case DataFormatJSBoolean:
2090     case DataFormatJSDouble:
2091         return GeneratedOperandJSValue;
2092
2093     case DataFormatJSInt32:
2094     case DataFormatInt32:
2095         return GeneratedOperandInteger;
2096
2097     default:
2098         RELEASE_ASSERT_NOT_REACHED();
2099         return GeneratedOperandTypeUnknown;
2100     }
2101 }
2102
2103 void SpeculativeJIT::compileValueToInt32(Node* node)
2104 {
2105     switch (node->child1().useKind()) {
2106 #if USE(JSVALUE64)
2107     case Int52RepUse: {
2108         SpeculateStrictInt52Operand op1(this, node->child1());
2109         GPRTemporary result(this, Reuse, op1);
2110         GPRReg op1GPR = op1.gpr();
2111         GPRReg resultGPR = result.gpr();
2112         m_jit.zeroExtend32ToPtr(op1GPR, resultGPR);
2113         int32Result(resultGPR, node, DataFormatInt32);
2114         return;
2115     }
2116 #endif // USE(JSVALUE64)
2117         
2118     case DoubleRepUse: {
2119         GPRTemporary result(this);
2120         SpeculateDoubleOperand op1(this, node->child1());
2121         FPRReg fpr = op1.fpr();
2122         GPRReg gpr = result.gpr();
2123         JITCompiler::Jump notTruncatedToInteger = m_jit.branchTruncateDoubleToInt32(fpr, gpr, JITCompiler::BranchIfTruncateFailed);
2124         
2125         addSlowPathGenerator(slowPathCall(notTruncatedToInteger, this, operationToInt32, gpr, fpr, NeedToSpill, ExceptionCheckRequirement::CheckNotNeeded));
2126         
2127         int32Result(gpr, node);
2128         return;
2129     }
2130     
2131     case NumberUse:
2132     case NotCellUse: {
2133         switch (checkGeneratedTypeForToInt32(node->child1().node())) {
2134         case GeneratedOperandInteger: {
2135             SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
2136             GPRTemporary result(this, Reuse, op1);
2137             m_jit.move(op1.gpr(), result.gpr());
2138             int32Result(result.gpr(), node, op1.format());
2139             return;
2140         }
2141         case GeneratedOperandJSValue: {
2142             GPRTemporary result(this);
2143 #if USE(JSVALUE64)
2144             JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2145
2146             GPRReg gpr = op1.gpr();
2147             GPRReg resultGpr = result.gpr();
2148             FPRTemporary tempFpr(this);
2149             FPRReg fpr = tempFpr.fpr();
2150
2151             JITCompiler::Jump isInteger = m_jit.branch64(MacroAssembler::AboveOrEqual, gpr, GPRInfo::tagTypeNumberRegister);
2152             JITCompiler::JumpList converted;
2153
2154             if (node->child1().useKind() == NumberUse) {
2155                 DFG_TYPE_CHECK(
2156                     JSValueRegs(gpr), node->child1(), SpecBytecodeNumber,
2157                     m_jit.branchTest64(
2158                         MacroAssembler::Zero, gpr, GPRInfo::tagTypeNumberRegister));
2159             } else {
2160                 JITCompiler::Jump isNumber = m_jit.branchTest64(MacroAssembler::NonZero, gpr, GPRInfo::tagTypeNumberRegister);
2161                 
2162                 DFG_TYPE_CHECK(
2163                     JSValueRegs(gpr), node->child1(), ~SpecCell, m_jit.branchIfCell(JSValueRegs(gpr)));
2164                 
2165                 // It's not a cell: so true turns into 1 and all else turns into 0.
2166                 m_jit.compare64(JITCompiler::Equal, gpr, TrustedImm32(ValueTrue), resultGpr);
2167                 converted.append(m_jit.jump());
2168                 
2169                 isNumber.link(&m_jit);
2170             }
2171
2172             // First, if we get here we have a double encoded as a JSValue
2173             unboxDouble(gpr, resultGpr, fpr);
2174
2175             silentSpillAllRegisters(resultGpr);
2176             callOperation(operationToInt32, resultGpr, fpr);
2177             silentFillAllRegisters(resultGpr);
2178
2179             converted.append(m_jit.jump());
2180
2181             isInteger.link(&m_jit);
2182             m_jit.zeroExtend32ToPtr(gpr, resultGpr);
2183
2184             converted.link(&m_jit);
2185 #else
2186             Node* childNode = node->child1().node();
2187             VirtualRegister virtualRegister = childNode->virtualRegister();
2188             GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
2189
2190             JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2191
2192             GPRReg payloadGPR = op1.payloadGPR();
2193             GPRReg resultGpr = result.gpr();
2194         
2195             JITCompiler::JumpList converted;
2196
2197             if (info.registerFormat() == DataFormatJSInt32)
2198                 m_jit.move(payloadGPR, resultGpr);
2199             else {
2200                 GPRReg tagGPR = op1.tagGPR();
2201                 FPRTemporary tempFpr(this);
2202                 FPRReg fpr = tempFpr.fpr();
2203                 FPRTemporary scratch(this);
2204
2205                 JITCompiler::Jump isInteger = m_jit.branch32(MacroAssembler::Equal, tagGPR, TrustedImm32(JSValue::Int32Tag));
2206
2207                 if (node->child1().useKind() == NumberUse) {
2208                     DFG_TYPE_CHECK(
2209                         op1.jsValueRegs(), node->child1(), SpecBytecodeNumber,
2210                         m_jit.branch32(
2211                             MacroAssembler::AboveOrEqual, tagGPR,
2212                             TrustedImm32(JSValue::LowestTag)));
2213                 } else {
2214                     JITCompiler::Jump isNumber = m_jit.branch32(MacroAssembler::Below, tagGPR, TrustedImm32(JSValue::LowestTag));
2215                     
2216                     DFG_TYPE_CHECK(
2217                         op1.jsValueRegs(), node->child1(), ~SpecCell,
2218                         m_jit.branchIfCell(op1.jsValueRegs()));
2219                     
2220                     // It's not a cell: so true turns into 1 and all else turns into 0.
2221                     JITCompiler::Jump isBoolean = m_jit.branch32(JITCompiler::Equal, tagGPR, TrustedImm32(JSValue::BooleanTag));
2222                     m_jit.move(TrustedImm32(0), resultGpr);
2223                     converted.append(m_jit.jump());
2224                     
2225                     isBoolean.link(&m_jit);
2226                     m_jit.move(payloadGPR, resultGpr);
2227                     converted.append(m_jit.jump());
2228                     
2229                     isNumber.link(&m_jit);
2230                 }
2231
2232                 unboxDouble(tagGPR, payloadGPR, fpr, scratch.fpr());
2233
2234                 silentSpillAllRegisters(resultGpr);
2235                 callOperation(operationToInt32, resultGpr, fpr);
2236                 silentFillAllRegisters(resultGpr);
2237
2238                 converted.append(m_jit.jump());
2239
2240                 isInteger.link(&m_jit);
2241                 m_jit.move(payloadGPR, resultGpr);
2242
2243                 converted.link(&m_jit);
2244             }
2245 #endif
2246             int32Result(resultGpr, node);
2247             return;
2248         }
2249         case GeneratedOperandTypeUnknown:
2250             RELEASE_ASSERT(!m_compileOkay);
2251             return;
2252         }
2253         RELEASE_ASSERT_NOT_REACHED();
2254         return;
2255     }
2256     
2257     default:
2258         ASSERT(!m_compileOkay);
2259         return;
2260     }
2261 }
2262
2263 void SpeculativeJIT::compileUInt32ToNumber(Node* node)
2264 {
2265     if (doesOverflow(node->arithMode())) {
2266         if (enableInt52()) {
2267             SpeculateInt32Operand op1(this, node->child1());
2268             GPRTemporary result(this, Reuse, op1);
2269             m_jit.zeroExtend32ToPtr(op1.gpr(), result.gpr());
2270             strictInt52Result(result.gpr(), node);
2271             return;
2272         }
2273         SpeculateInt32Operand op1(this, node->child1());
2274         FPRTemporary result(this);
2275             
2276         GPRReg inputGPR = op1.gpr();
2277         FPRReg outputFPR = result.fpr();
2278             
2279         m_jit.convertInt32ToDouble(inputGPR, outputFPR);
2280             
2281         JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, inputGPR, TrustedImm32(0));
2282         m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), outputFPR);
2283         positive.link(&m_jit);
2284             
2285         doubleResult(outputFPR, node);
2286         return;
2287     }
2288     
2289     RELEASE_ASSERT(node->arithMode() == Arith::CheckOverflow);
2290
2291     SpeculateInt32Operand op1(this, node->child1());
2292     GPRTemporary result(this);
2293
2294     m_jit.move(op1.gpr(), result.gpr());
2295
2296     speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, result.gpr(), TrustedImm32(0)));
2297
2298     int32Result(result.gpr(), node, op1.format());
2299 }
2300
2301 void SpeculativeJIT::compileDoubleAsInt32(Node* node)
2302 {
2303     SpeculateDoubleOperand op1(this, node->child1());
2304     FPRTemporary scratch(this);
2305     GPRTemporary result(this);
2306     
2307     FPRReg valueFPR = op1.fpr();
2308     FPRReg scratchFPR = scratch.fpr();
2309     GPRReg resultGPR = result.gpr();
2310
2311     JITCompiler::JumpList failureCases;
2312     RELEASE_ASSERT(shouldCheckOverflow(node->arithMode()));
2313     m_jit.branchConvertDoubleToInt32(
2314         valueFPR, resultGPR, failureCases, scratchFPR,
2315         shouldCheckNegativeZero(node->arithMode()));
2316     speculationCheck(Overflow, JSValueRegs(), 0, failureCases);
2317
2318     int32Result(resultGPR, node);
2319 }
2320
2321 void SpeculativeJIT::compileDoubleRep(Node* node)
2322 {
2323     switch (node->child1().useKind()) {
2324     case RealNumberUse: {
2325         JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2326         FPRTemporary result(this);
2327         
2328         JSValueRegs op1Regs = op1.jsValueRegs();
2329         FPRReg resultFPR = result.fpr();
2330         
2331 #if USE(JSVALUE64)
2332         GPRTemporary temp(this);
2333         GPRReg tempGPR = temp.gpr();
2334         m_jit.unboxDoubleWithoutAssertions(op1Regs.gpr(), tempGPR, resultFPR);
2335 #else
2336         FPRTemporary temp(this);
2337         FPRReg tempFPR = temp.fpr();
2338         unboxDouble(op1Regs.tagGPR(), op1Regs.payloadGPR(), resultFPR, tempFPR);
2339 #endif
2340         
2341         JITCompiler::Jump done = m_jit.branchDouble(
2342             JITCompiler::DoubleEqual, resultFPR, resultFPR);
2343         
2344         DFG_TYPE_CHECK(
2345             op1Regs, node->child1(), SpecBytecodeRealNumber, m_jit.branchIfNotInt32(op1Regs));
2346         m_jit.convertInt32ToDouble(op1Regs.payloadGPR(), resultFPR);
2347         
2348         done.link(&m_jit);
2349         
2350         doubleResult(resultFPR, node);
2351         return;
2352     }
2353     
2354     case NotCellUse:
2355     case NumberUse: {
2356         ASSERT(!node->child1()->isNumberConstant()); // This should have been constant folded.
2357
2358         SpeculatedType possibleTypes = m_state.forNode(node->child1()).m_type;
2359         if (isInt32Speculation(possibleTypes)) {
2360             SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
2361             FPRTemporary result(this);
2362             m_jit.convertInt32ToDouble(op1.gpr(), result.fpr());
2363             doubleResult(result.fpr(), node);
2364             return;
2365         }
2366
2367         JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2368         FPRTemporary result(this);
2369
2370 #if USE(JSVALUE64)
2371         GPRTemporary temp(this);
2372
2373         GPRReg op1GPR = op1.gpr();
2374         GPRReg tempGPR = temp.gpr();
2375         FPRReg resultFPR = result.fpr();
2376         JITCompiler::JumpList done;
2377
2378         JITCompiler::Jump isInteger = m_jit.branch64(
2379             MacroAssembler::AboveOrEqual, op1GPR, GPRInfo::tagTypeNumberRegister);
2380
2381         if (node->child1().useKind() == NotCellUse) {
2382             JITCompiler::Jump isNumber = m_jit.branchTest64(MacroAssembler::NonZero, op1GPR, GPRInfo::tagTypeNumberRegister);
2383             JITCompiler::Jump isUndefined = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueUndefined));
2384
2385             static const double zero = 0;
2386             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&zero), resultFPR);
2387
2388             JITCompiler::Jump isNull = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueNull));
2389             done.append(isNull);
2390
2391             DFG_TYPE_CHECK(JSValueRegs(op1GPR), node->child1(), ~SpecCell,
2392                 m_jit.branchTest64(JITCompiler::Zero, op1GPR, TrustedImm32(static_cast<int32_t>(TagBitBool))));
2393
2394             JITCompiler::Jump isFalse = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueFalse));
2395             static const double one = 1;
2396             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&one), resultFPR);
2397             done.append(m_jit.jump());
2398             done.append(isFalse);
2399
2400             isUndefined.link(&m_jit);
2401             static const double NaN = PNaN;
2402             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&NaN), resultFPR);
2403             done.append(m_jit.jump());
2404
2405             isNumber.link(&m_jit);
2406         } else if (needsTypeCheck(node->child1(), SpecBytecodeNumber)) {
2407             typeCheck(
2408                 JSValueRegs(op1GPR), node->child1(), SpecBytecodeNumber,
2409                 m_jit.branchTest64(MacroAssembler::Zero, op1GPR, GPRInfo::tagTypeNumberRegister));
2410         }
2411
2412         unboxDouble(op1GPR, tempGPR, resultFPR);
2413         done.append(m_jit.jump());
2414     
2415         isInteger.link(&m_jit);
2416         m_jit.convertInt32ToDouble(op1GPR, resultFPR);
2417         done.link(&m_jit);
2418 #else // USE(JSVALUE64) -> this is the 32_64 case
2419         FPRTemporary temp(this);
2420     
2421         GPRReg op1TagGPR = op1.tagGPR();
2422         GPRReg op1PayloadGPR = op1.payloadGPR();
2423         FPRReg tempFPR = temp.fpr();
2424         FPRReg resultFPR = result.fpr();
2425         JITCompiler::JumpList done;
2426     
2427         JITCompiler::Jump isInteger = m_jit.branch32(
2428             MacroAssembler::Equal, op1TagGPR, TrustedImm32(JSValue::Int32Tag));
2429
2430         if (node->child1().useKind() == NotCellUse) {
2431             JITCompiler::Jump isNumber = m_jit.branch32(JITCompiler::Below, op1TagGPR, JITCompiler::TrustedImm32(JSValue::LowestTag + 1));
2432             JITCompiler::Jump isUndefined = m_jit.branch32(JITCompiler::Equal, op1TagGPR, TrustedImm32(JSValue::UndefinedTag));
2433
2434             static const double zero = 0;
2435             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&zero), resultFPR);
2436
2437             JITCompiler::Jump isNull = m_jit.branch32(JITCompiler::Equal, op1TagGPR, TrustedImm32(JSValue::NullTag));
2438             done.append(isNull);
2439
2440             DFG_TYPE_CHECK(JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), ~SpecCell, m_jit.branch32(JITCompiler::NotEqual, op1TagGPR, TrustedImm32(JSValue::BooleanTag)));
2441
2442             JITCompiler::Jump isFalse = m_jit.branchTest32(JITCompiler::Zero, op1PayloadGPR, TrustedImm32(1));
2443             static const double one = 1;
2444             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&one), resultFPR);
2445             done.append(m_jit.jump());
2446             done.append(isFalse);
2447
2448             isUndefined.link(&m_jit);
2449             static const double NaN = PNaN;
2450             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&NaN), resultFPR);
2451             done.append(m_jit.jump());
2452
2453             isNumber.link(&m_jit);
2454         } else if (needsTypeCheck(node->child1(), SpecBytecodeNumber)) {
2455             typeCheck(
2456                 JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), SpecBytecodeNumber,
2457                 m_jit.branch32(MacroAssembler::AboveOrEqual, op1TagGPR, TrustedImm32(JSValue::LowestTag)));
2458         }
2459
2460         unboxDouble(op1TagGPR, op1PayloadGPR, resultFPR, tempFPR);
2461         done.append(m_jit.jump());
2462     
2463         isInteger.link(&m_jit);
2464         m_jit.convertInt32ToDouble(op1PayloadGPR, resultFPR);
2465         done.link(&m_jit);
2466 #endif // USE(JSVALUE64)
2467     
2468         doubleResult(resultFPR, node);
2469         return;
2470     }
2471         
2472 #if USE(JSVALUE64)
2473     case Int52RepUse: {
2474         SpeculateStrictInt52Operand value(this, node->child1());
2475         FPRTemporary result(this);
2476         
2477         GPRReg valueGPR = value.gpr();
2478         FPRReg resultFPR = result.fpr();
2479
2480         m_jit.convertInt64ToDouble(valueGPR, resultFPR);
2481         
2482         doubleResult(resultFPR, node);
2483         return;
2484     }
2485 #endif // USE(JSVALUE64)
2486         
2487     default:
2488         RELEASE_ASSERT_NOT_REACHED();
2489         return;
2490     }
2491 }
2492
2493 void SpeculativeJIT::compileValueRep(Node* node)
2494 {
2495     switch (node->child1().useKind()) {
2496     case DoubleRepUse: {
2497         SpeculateDoubleOperand value(this, node->child1());
2498         JSValueRegsTemporary result(this);
2499         
2500         FPRReg valueFPR = value.fpr();
2501         JSValueRegs resultRegs = result.regs();
2502         
2503         // It's very tempting to in-place filter the value to indicate that it's not impure NaN
2504         // anymore. Unfortunately, this would be unsound. If it's a GetLocal or if the value was
2505         // subject to a prior SetLocal, filtering the value would imply that the corresponding
2506         // local was purified.
2507         if (needsTypeCheck(node->child1(), ~SpecDoubleImpureNaN))
2508             m_jit.purifyNaN(valueFPR);
2509
2510         boxDouble(valueFPR, resultRegs);
2511         
2512         jsValueResult(resultRegs, node);
2513         return;
2514     }
2515         
2516 #if USE(JSVALUE64)
2517     case Int52RepUse: {
2518         SpeculateStrictInt52Operand value(this, node->child1());
2519         GPRTemporary result(this);
2520         
2521         GPRReg valueGPR = value.gpr();
2522         GPRReg resultGPR = result.gpr();
2523         
2524         boxInt52(valueGPR, resultGPR, DataFormatStrictInt52);
2525         
2526         jsValueResult(resultGPR, node);
2527         return;
2528     }
2529 #endif // USE(JSVALUE64)
2530         
2531     default:
2532         RELEASE_ASSERT_NOT_REACHED();
2533         return;
2534     }
2535 }
2536
2537 static double clampDoubleToByte(double d)
2538 {
2539     d += 0.5;
2540     if (!(d > 0))
2541         d = 0;
2542     else if (d > 255)
2543         d = 255;
2544     return d;
2545 }
2546
2547 static void compileClampIntegerToByte(JITCompiler& jit, GPRReg result)
2548 {
2549     MacroAssembler::Jump inBounds = jit.branch32(MacroAssembler::BelowOrEqual, result, JITCompiler::TrustedImm32(0xff));
2550     MacroAssembler::Jump tooBig = jit.branch32(MacroAssembler::GreaterThan, result, JITCompiler::TrustedImm32(0xff));
2551     jit.xorPtr(result, result);
2552     MacroAssembler::Jump clamped = jit.jump();
2553     tooBig.link(&jit);
2554     jit.move(JITCompiler::TrustedImm32(255), result);
2555     clamped.link(&jit);
2556     inBounds.link(&jit);
2557 }
2558
2559 static void compileClampDoubleToByte(JITCompiler& jit, GPRReg result, FPRReg source, FPRReg scratch)
2560 {
2561     // Unordered compare so we pick up NaN
2562     static const double zero = 0;
2563     static const double byteMax = 255;
2564     static const double half = 0.5;
2565     jit.loadDouble(MacroAssembler::TrustedImmPtr(&zero), scratch);
2566     MacroAssembler::Jump tooSmall = jit.branchDouble(MacroAssembler::DoubleLessThanOrEqualOrUnordered, source, scratch);
2567     jit.loadDouble(MacroAssembler::TrustedImmPtr(&byteMax), scratch);
2568     MacroAssembler::Jump tooBig = jit.branchDouble(MacroAssembler::DoubleGreaterThan, source, scratch);
2569     
2570     jit.loadDouble(MacroAssembler::TrustedImmPtr(&half), scratch);
2571     // FIXME: This should probably just use a floating point round!
2572     // https://bugs.webkit.org/show_bug.cgi?id=72054
2573     jit.addDouble(source, scratch);
2574     jit.truncateDoubleToInt32(scratch, result);   
2575     MacroAssembler::Jump truncatedInt = jit.jump();
2576     
2577     tooSmall.link(&jit);
2578     jit.xorPtr(result, result);
2579     MacroAssembler::Jump zeroed = jit.jump();
2580     
2581     tooBig.link(&jit);
2582     jit.move(JITCompiler::TrustedImm32(255), result);
2583     
2584     truncatedInt.link(&jit);
2585     zeroed.link(&jit);
2586
2587 }
2588
2589 JITCompiler::Jump SpeculativeJIT::jumpForTypedArrayOutOfBounds(Node* node, GPRReg baseGPR, GPRReg indexGPR)
2590 {
2591     if (node->op() == PutByValAlias)
2592         return JITCompiler::Jump();
2593     JSArrayBufferView* view = m_jit.graph().tryGetFoldableView(
2594         m_state.forNode(m_jit.graph().child(node, 0)).m_value, node->arrayMode());
2595     if (view) {
2596         uint32_t length = view->length();
2597         Node* indexNode = m_jit.graph().child(node, 1).node();
2598         if (indexNode->isInt32Constant() && indexNode->asUInt32() < length)
2599             return JITCompiler::Jump();
2600         return m_jit.branch32(
2601             MacroAssembler::AboveOrEqual, indexGPR, MacroAssembler::Imm32(length));
2602     }
2603     return m_jit.branch32(
2604         MacroAssembler::AboveOrEqual, indexGPR,
2605         MacroAssembler::Address(baseGPR, JSArrayBufferView::offsetOfLength()));
2606 }
2607
2608 void SpeculativeJIT::emitTypedArrayBoundsCheck(Node* node, GPRReg baseGPR, GPRReg indexGPR)
2609 {
2610     JITCompiler::Jump jump = jumpForTypedArrayOutOfBounds(node, baseGPR, indexGPR);
2611     if (!jump.isSet())
2612         return;
2613     speculationCheck(OutOfBounds, JSValueRegs(), 0, jump);
2614 }
2615
2616 void SpeculativeJIT::compileGetByValOnIntTypedArray(Node* node, TypedArrayType type)
2617 {
2618     ASSERT(isInt(type));
2619     
2620     SpeculateCellOperand base(this, node->child1());
2621     SpeculateStrictInt32Operand property(this, node->child2());
2622     StorageOperand storage(this, node->child3());
2623
2624     GPRReg baseReg = base.gpr();
2625     GPRReg propertyReg = property.gpr();
2626     GPRReg storageReg = storage.gpr();
2627
2628     GPRTemporary result(this);
2629     GPRReg resultReg = result.gpr();
2630
2631     ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2632
2633     emitTypedArrayBoundsCheck(node, baseReg, propertyReg);
2634     switch (elementSize(type)) {
2635     case 1:
2636         if (isSigned(type))
2637             m_jit.load8SignedExtendTo32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2638         else
2639             m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2640         break;
2641     case 2:
2642         if (isSigned(type))
2643             m_jit.load16SignedExtendTo32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2644         else
2645             m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2646         break;
2647     case 4:
2648         m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
2649         break;
2650     default:
2651         CRASH();
2652     }
2653     if (elementSize(type) < 4 || isSigned(type)) {
2654         int32Result(resultReg, node);
2655         return;
2656     }
2657     
2658     ASSERT(elementSize(type) == 4 && !isSigned(type));
2659     if (node->shouldSpeculateInt32()) {
2660         speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, resultReg, TrustedImm32(0)));
2661         int32Result(resultReg, node);
2662         return;
2663     }
2664     
2665 #if USE(JSVALUE64)
2666     if (node->shouldSpeculateAnyInt()) {
2667         m_jit.zeroExtend32ToPtr(resultReg, resultReg);
2668         strictInt52Result(resultReg, node);
2669         return;
2670     }
2671 #endif
2672     
2673     FPRTemporary fresult(this);
2674     m_jit.convertInt32ToDouble(resultReg, fresult.fpr());
2675     JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, resultReg, TrustedImm32(0));
2676     m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), fresult.fpr());
2677     positive.link(&m_jit);
2678     doubleResult(fresult.fpr(), node);
2679 }
2680
2681 void SpeculativeJIT::compilePutByValForIntTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
2682 {
2683     ASSERT(isInt(type));
2684     
2685     StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
2686     GPRReg storageReg = storage.gpr();
2687     
2688     Edge valueUse = m_jit.graph().varArgChild(node, 2);
2689     
2690     GPRTemporary value;
2691     GPRReg valueGPR = InvalidGPRReg;
2692     
2693     if (valueUse->isConstant()) {
2694         JSValue jsValue = valueUse->asJSValue();
2695         if (!jsValue.isNumber()) {
2696             terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
2697             noResult(node);
2698             return;
2699         }
2700         double d = jsValue.asNumber();
2701         if (isClamped(type)) {
2702             ASSERT(elementSize(type) == 1);
2703             d = clampDoubleToByte(d);
2704         }
2705         GPRTemporary scratch(this);
2706         GPRReg scratchReg = scratch.gpr();
2707         m_jit.move(Imm32(toInt32(d)), scratchReg);
2708         value.adopt(scratch);
2709         valueGPR = scratchReg;
2710     } else {
2711         switch (valueUse.useKind()) {
2712         case Int32Use: {
2713             SpeculateInt32Operand valueOp(this, valueUse);
2714             GPRTemporary scratch(this);
2715             GPRReg scratchReg = scratch.gpr();
2716             m_jit.move(valueOp.gpr(), scratchReg);
2717             if (isClamped(type)) {
2718                 ASSERT(elementSize(type) == 1);
2719                 compileClampIntegerToByte(m_jit, scratchReg);
2720             }
2721             value.adopt(scratch);
2722             valueGPR = scratchReg;
2723             break;
2724         }
2725             
2726 #if USE(JSVALUE64)
2727         case Int52RepUse: {
2728             SpeculateStrictInt52Operand valueOp(this, valueUse);
2729             GPRTemporary scratch(this);
2730             GPRReg scratchReg = scratch.gpr();
2731             m_jit.move(valueOp.gpr(), scratchReg);
2732             if (isClamped(type)) {
2733                 ASSERT(elementSize(type) == 1);
2734                 MacroAssembler::Jump inBounds = m_jit.branch64(
2735                     MacroAssembler::BelowOrEqual, scratchReg, JITCompiler::TrustedImm64(0xff));
2736                 MacroAssembler::Jump tooBig = m_jit.branch64(
2737                     MacroAssembler::GreaterThan, scratchReg, JITCompiler::TrustedImm64(0xff));
2738                 m_jit.move(TrustedImm32(0), scratchReg);
2739                 MacroAssembler::Jump clamped = m_jit.jump();
2740                 tooBig.link(&m_jit);
2741                 m_jit.move(JITCompiler::TrustedImm32(255), scratchReg);
2742                 clamped.link(&m_jit);
2743                 inBounds.link(&m_jit);
2744             }
2745             value.adopt(scratch);
2746             valueGPR = scratchReg;
2747             break;
2748         }
2749 #endif // USE(JSVALUE64)
2750             
2751         case DoubleRepUse: {
2752             if (isClamped(type)) {
2753                 ASSERT(elementSize(type) == 1);
2754                 SpeculateDoubleOperand valueOp(this, valueUse);
2755                 GPRTemporary result(this);
2756                 FPRTemporary floatScratch(this);
2757                 FPRReg fpr = valueOp.fpr();
2758                 GPRReg gpr = result.gpr();
2759                 compileClampDoubleToByte(m_jit, gpr, fpr, floatScratch.fpr());
2760                 value.adopt(result);
2761                 valueGPR = gpr;
2762             } else {
2763                 SpeculateDoubleOperand valueOp(this, valueUse);
2764                 GPRTemporary result(this);
2765                 FPRReg fpr = valueOp.fpr();
2766                 GPRReg gpr = result.gpr();
2767                 MacroAssembler::Jump notNaN = m_jit.branchDouble(MacroAssembler::DoubleEqual, fpr, fpr);
2768                 m_jit.xorPtr(gpr, gpr);
2769                 MacroAssembler::Jump fixed = m_jit.jump();
2770                 notNaN.link(&m_jit);
2771                 
2772                 MacroAssembler::Jump failed = m_jit.branchTruncateDoubleToInt32(
2773                     fpr, gpr, MacroAssembler::BranchIfTruncateFailed);
2774                 
2775                 addSlowPathGenerator(slowPathCall(failed, this, operationToInt32, gpr, fpr, NeedToSpill, ExceptionCheckRequirement::CheckNotNeeded));
2776                 
2777                 fixed.link(&m_jit);
2778                 value.adopt(result);
2779                 valueGPR = gpr;
2780             }
2781             break;
2782         }
2783             
2784         default:
2785             RELEASE_ASSERT_NOT_REACHED();
2786             break;
2787         }
2788     }
2789     
2790     ASSERT_UNUSED(valueGPR, valueGPR != property);
2791     ASSERT(valueGPR != base);
2792     ASSERT(valueGPR != storageReg);
2793     MacroAssembler::Jump outOfBounds = jumpForTypedArrayOutOfBounds(node, base, property);
2794     if (node->arrayMode().isInBounds() && outOfBounds.isSet()) {
2795         speculationCheck(OutOfBounds, JSValueSource(), 0, outOfBounds);
2796         outOfBounds = MacroAssembler::Jump();
2797     }
2798
2799     switch (elementSize(type)) {
2800     case 1:
2801         m_jit.store8(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesOne));
2802         break;
2803     case 2:
2804         m_jit.store16(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesTwo));
2805         break;
2806     case 4:
2807         m_jit.store32(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
2808         break;
2809     default:
2810         CRASH();
2811     }
2812     if (outOfBounds.isSet())
2813         outOfBounds.link(&m_jit);
2814     noResult(node);
2815 }
2816
2817 void SpeculativeJIT::compileGetByValOnFloatTypedArray(Node* node, TypedArrayType type)
2818 {
2819     ASSERT(isFloat(type));
2820     
2821     SpeculateCellOperand base(this, node->child1());
2822     SpeculateStrictInt32Operand property(this, node->child2());
2823     StorageOperand storage(this, node->child3());
2824
2825     GPRReg baseReg = base.gpr();
2826     GPRReg propertyReg = property.gpr();
2827     GPRReg storageReg = storage.gpr();
2828
2829     ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2830
2831     FPRTemporary result(this);
2832     FPRReg resultReg = result.fpr();
2833     emitTypedArrayBoundsCheck(node, baseReg, propertyReg);
2834     switch (elementSize(type)) {
2835     case 4:
2836         m_jit.loadFloat(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
2837         m_jit.convertFloatToDouble(resultReg, resultReg);
2838         break;
2839     case 8: {
2840         m_jit.loadDouble(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight), resultReg);
2841         break;
2842     }
2843     default:
2844         RELEASE_ASSERT_NOT_REACHED();
2845     }
2846     
2847     doubleResult(resultReg, node);
2848 }
2849
2850 void SpeculativeJIT::compilePutByValForFloatTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
2851 {
2852     ASSERT(isFloat(type));
2853     
2854     StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
2855     GPRReg storageReg = storage.gpr();
2856     
2857     Edge baseUse = m_jit.graph().varArgChild(node, 0);
2858     Edge valueUse = m_jit.graph().varArgChild(node, 2);
2859
2860     SpeculateDoubleOperand valueOp(this, valueUse);
2861     FPRTemporary scratch(this);
2862     FPRReg valueFPR = valueOp.fpr();
2863     FPRReg scratchFPR = scratch.fpr();
2864
2865     ASSERT_UNUSED(baseUse, node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(baseUse)));
2866     
2867     MacroAssembler::Jump outOfBounds = jumpForTypedArrayOutOfBounds(node, base, property);
2868     if (node->arrayMode().isInBounds() && outOfBounds.isSet()) {
2869         speculationCheck(OutOfBounds, JSValueSource(), 0, outOfBounds);
2870         outOfBounds = MacroAssembler::Jump();
2871     }
2872     
2873     switch (elementSize(type)) {
2874     case 4: {
2875         m_jit.moveDouble(valueFPR, scratchFPR);
2876         m_jit.convertDoubleToFloat(valueFPR, scratchFPR);
2877         m_jit.storeFloat(scratchFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
2878         break;
2879     }
2880     case 8:
2881         m_jit.storeDouble(valueFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesEight));
2882         break;
2883     default:
2884         RELEASE_ASSERT_NOT_REACHED();
2885     }
2886     if (outOfBounds.isSet())
2887         outOfBounds.link(&m_jit);
2888     noResult(node);
2889 }
2890
2891 void SpeculativeJIT::compileInstanceOfForObject(Node*, GPRReg valueReg, GPRReg prototypeReg, GPRReg scratchReg, GPRReg scratch2Reg)
2892 {
2893     // Check that prototype is an object.
2894     speculationCheck(BadType, JSValueRegs(), 0, m_jit.branchIfNotObject(prototypeReg));
2895     
2896     // Initialize scratchReg with the value being checked.
2897     m_jit.move(valueReg, scratchReg);
2898     
2899     // Walk up the prototype chain of the value (in scratchReg), comparing to prototypeReg.
2900     MacroAssembler::Label loop(&m_jit);
2901     MacroAssembler::Jump performDefaultHasInstance = m_jit.branch8(MacroAssembler::Equal,
2902         MacroAssembler::Address(scratchReg, JSCell::typeInfoTypeOffset()), TrustedImm32(ProxyObjectType));
2903     m_jit.emitLoadStructure(scratchReg, scratchReg, scratch2Reg);
2904     m_jit.loadPtr(MacroAssembler::Address(scratchReg, Structure::prototypeOffset() + CellPayloadOffset), scratchReg);
2905     MacroAssembler::Jump isInstance = m_jit.branchPtr(MacroAssembler::Equal, scratchReg, prototypeReg);
2906 #if USE(JSVALUE64)
2907     m_jit.branchIfCell(JSValueRegs(scratchReg)).linkTo(loop, &m_jit);
2908 #else
2909     m_jit.branchTestPtr(MacroAssembler::NonZero, scratchReg).linkTo(loop, &m_jit);
2910 #endif
2911     
2912     // No match - result is false.
2913 #if USE(JSVALUE64)
2914     m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(false))), scratchReg);
2915 #else
2916     m_jit.move(MacroAssembler::TrustedImm32(0), scratchReg);
2917 #endif
2918     MacroAssembler::JumpList doneJumps; 
2919     doneJumps.append(m_jit.jump());
2920
2921     performDefaultHasInstance.link(&m_jit);
2922     silentSpillAllRegisters(scratchReg);
2923     callOperation(operationDefaultHasInstance, scratchReg, valueReg, prototypeReg); 
2924     silentFillAllRegisters(scratchReg);
2925     m_jit.exceptionCheck();
2926 #if USE(JSVALUE64)
2927     m_jit.or32(TrustedImm32(ValueFalse), scratchReg);
2928 #endif
2929     doneJumps.append(m_jit.jump());
2930     
2931     isInstance.link(&m_jit);
2932 #if USE(JSVALUE64)
2933     m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(true))), scratchReg);
2934 #else
2935     m_jit.move(MacroAssembler::TrustedImm32(1), scratchReg);
2936 #endif
2937     
2938     doneJumps.link(&m_jit);
2939 }
2940
2941 void SpeculativeJIT::compileCheckTypeInfoFlags(Node* node)
2942 {
2943     SpeculateCellOperand base(this, node->child1());
2944
2945     GPRReg baseGPR = base.gpr();
2946
2947     speculationCheck(BadTypeInfoFlags, JSValueRegs(), 0, m_jit.branchTest8(MacroAssembler::Zero, MacroAssembler::Address(baseGPR, JSCell::typeInfoFlagsOffset()), MacroAssembler::TrustedImm32(node->typeInfoOperand())));
2948
2949     noResult(node);
2950 }
2951
2952 void SpeculativeJIT::compileInstanceOf(Node* node)
2953 {
2954     if (node->child1().useKind() == UntypedUse) {
2955         // It might not be a cell. Speculate less aggressively.
2956         // Or: it might only be used once (i.e. by us), so we get zero benefit
2957         // from speculating any more aggressively than we absolutely need to.
2958         
2959         JSValueOperand value(this, node->child1());
2960         SpeculateCellOperand prototype(this, node->child2());
2961         GPRTemporary scratch(this);
2962         GPRTemporary scratch2(this);
2963         
2964         GPRReg prototypeReg = prototype.gpr();
2965         GPRReg scratchReg = scratch.gpr();
2966         GPRReg scratch2Reg = scratch2.gpr();
2967         
2968         MacroAssembler::Jump isCell = m_jit.branchIfCell(value.jsValueRegs());
2969         GPRReg valueReg = value.jsValueRegs().payloadGPR();
2970         moveFalseTo(scratchReg);
2971
2972         MacroAssembler::Jump done = m_jit.jump();
2973         
2974         isCell.link(&m_jit);
2975         
2976         compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg, scratch2Reg);
2977         
2978         done.link(&m_jit);
2979
2980         blessedBooleanResult(scratchReg, node);
2981         return;
2982     }
2983     
2984     SpeculateCellOperand value(this, node->child1());
2985     SpeculateCellOperand prototype(this, node->child2());
2986     
2987     GPRTemporary scratch(this);
2988     GPRTemporary scratch2(this);
2989     
2990     GPRReg valueReg = value.gpr();
2991     GPRReg prototypeReg = prototype.gpr();
2992     GPRReg scratchReg = scratch.gpr();
2993     GPRReg scratch2Reg = scratch2.gpr();
2994     
2995     compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg, scratch2Reg);
2996
2997     blessedBooleanResult(scratchReg, node);
2998 }
2999
3000 template<typename SnippetGenerator, J_JITOperation_EJJ snippetSlowPathFunction>
3001 void SpeculativeJIT::emitUntypedBitOp(Node* node)
3002 {
3003     Edge& leftChild = node->child1();
3004     Edge& rightChild = node->child2();
3005
3006     if (isKnownNotNumber(leftChild.node()) || isKnownNotNumber(rightChild.node())) {
3007         JSValueOperand left(this, leftChild);
3008         JSValueOperand right(this, rightChild);
3009         JSValueRegs leftRegs = left.jsValueRegs();
3010         JSValueRegs rightRegs = right.jsValueRegs();
3011 #if USE(JSVALUE64)
3012         GPRTemporary result(this);
3013         JSValueRegs resultRegs = JSValueRegs(result.gpr());
3014 #else
3015         GPRTemporary resultTag(this);
3016         GPRTemporary resultPayload(this);
3017         JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3018 #endif
3019         flushRegisters();
3020         callOperation(snippetSlowPathFunction, resultRegs, leftRegs, rightRegs);
3021         m_jit.exceptionCheck();
3022
3023         jsValueResult(resultRegs, node);
3024         return;
3025     }
3026
3027     Optional<JSValueOperand> left;
3028     Optional<JSValueOperand> right;
3029
3030     JSValueRegs leftRegs;
3031     JSValueRegs rightRegs;
3032
3033 #if USE(JSVALUE64)
3034     GPRTemporary result(this);
3035     JSValueRegs resultRegs = JSValueRegs(result.gpr());
3036     GPRTemporary scratch(this);
3037     GPRReg scratchGPR = scratch.gpr();
3038 #else
3039     GPRTemporary resultTag(this);
3040     GPRTemporary resultPayload(this);
3041     JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3042     GPRReg scratchGPR = resultTag.gpr();
3043 #endif
3044
3045     SnippetOperand leftOperand;
3046     SnippetOperand rightOperand;
3047
3048     // The snippet generator does not support both operands being constant. If the left
3049     // operand is already const, we'll ignore the right operand's constness.
3050     if (leftChild->isInt32Constant())
3051         leftOperand.setConstInt32(leftChild->asInt32());
3052     else if (rightChild->isInt32Constant())
3053         rightOperand.setConstInt32(rightChild->asInt32());
3054
3055     RELEASE_ASSERT(!leftOperand.isConst() || !rightOperand.isConst());
3056
3057     if (!leftOperand.isConst()) {
3058         left = JSValueOperand(this, leftChild);
3059         leftRegs = left->jsValueRegs();
3060     }
3061     if (!rightOperand.isConst()) {
3062         right = JSValueOperand(this, rightChild);
3063         rightRegs = right->jsValueRegs();
3064     }
3065
3066     SnippetGenerator gen(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs, scratchGPR);
3067     gen.generateFastPath(m_jit);
3068
3069     ASSERT(gen.didEmitFastPath());
3070     gen.endJumpList().append(m_jit.jump());
3071
3072     gen.slowPathJumpList().link(&m_jit);
3073     silentSpillAllRegisters(resultRegs);
3074
3075     if (leftOperand.isConst()) {
3076         leftRegs = resultRegs;
3077         m_jit.moveValue(leftChild->asJSValue(), leftRegs);
3078     } else if (rightOperand.isConst()) {
3079         rightRegs = resultRegs;
3080         m_jit.moveValue(rightChild->asJSValue(), rightRegs);
3081     }
3082
3083     callOperation(snippetSlowPathFunction, resultRegs, leftRegs, rightRegs);
3084
3085     silentFillAllRegisters(resultRegs);
3086     m_jit.exceptionCheck();
3087
3088     gen.endJumpList().link(&m_jit);
3089     jsValueResult(resultRegs, node);
3090 }
3091
3092 void SpeculativeJIT::compileBitwiseOp(Node* node)
3093 {
3094     NodeType op = node->op();
3095     Edge& leftChild = node->child1();
3096     Edge& rightChild = node->child2();
3097
3098     if (leftChild.useKind() == UntypedUse || rightChild.useKind() == UntypedUse) {
3099         switch (op) {
3100         case BitAnd:
3101             emitUntypedBitOp<JITBitAndGenerator, operationValueBitAnd>(node);
3102             return;
3103         case BitOr:
3104             emitUntypedBitOp<JITBitOrGenerator, operationValueBitOr>(node);
3105             return;
3106         case BitXor:
3107             emitUntypedBitOp<JITBitXorGenerator, operationValueBitXor>(node);
3108             return;
3109         default:
3110             RELEASE_ASSERT_NOT_REACHED();
3111         }
3112     }
3113
3114     if (leftChild->isInt32Constant()) {
3115         SpeculateInt32Operand op2(this, rightChild);
3116         GPRTemporary result(this, Reuse, op2);
3117
3118         bitOp(op, leftChild->asInt32(), op2.gpr(), result.gpr());
3119
3120         int32Result(result.gpr(), node);
3121
3122     } else if (rightChild->isInt32Constant()) {
3123         SpeculateInt32Operand op1(this, leftChild);
3124         GPRTemporary result(this, Reuse, op1);
3125
3126         bitOp(op, rightChild->asInt32(), op1.gpr(), result.gpr());
3127
3128         int32Result(result.gpr(), node);
3129
3130     } else {
3131         SpeculateInt32Operand op1(this, leftChild);
3132         SpeculateInt32Operand op2(this, rightChild);
3133         GPRTemporary result(this, Reuse, op1, op2);
3134         
3135         GPRReg reg1 = op1.gpr();
3136         GPRReg reg2 = op2.gpr();
3137         bitOp(op, reg1, reg2, result.gpr());
3138         
3139         int32Result(result.gpr(), node);
3140     }
3141 }
3142
3143 void SpeculativeJIT::emitUntypedRightShiftBitOp(Node* node)
3144 {
3145     J_JITOperation_EJJ snippetSlowPathFunction = node->op() == BitRShift
3146         ? operationValueBitRShift : operationValueBitURShift;
3147     JITRightShiftGenerator::ShiftType shiftType = node->op() == BitRShift
3148         ? JITRightShiftGenerator::SignedShift : JITRightShiftGenerator::UnsignedShift;
3149
3150     Edge& leftChild = node->child1();
3151     Edge& rightChild = node->child2();
3152
3153     if (isKnownNotNumber(leftChild.node()) || isKnownNotNumber(rightChild.node())) {
3154         JSValueOperand left(this, leftChild);
3155         JSValueOperand right(this, rightChild);
3156         JSValueRegs leftRegs = left.jsValueRegs();
3157         JSValueRegs rightRegs = right.jsValueRegs();
3158 #if USE(JSVALUE64)
3159         GPRTemporary result(this);
3160         JSValueRegs resultRegs = JSValueRegs(result.gpr());
3161 #else
3162         GPRTemporary resultTag(this);
3163         GPRTemporary resultPayload(this);
3164         JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3165 #endif
3166         flushRegisters();
3167         callOperation(snippetSlowPathFunction, resultRegs, leftRegs, rightRegs);
3168         m_jit.exceptionCheck();
3169
3170         jsValueResult(resultRegs, node);
3171         return;
3172     }
3173
3174     Optional<JSValueOperand> left;
3175     Optional<JSValueOperand> right;
3176
3177     JSValueRegs leftRegs;
3178     JSValueRegs rightRegs;
3179
3180     FPRTemporary leftNumber(this);
3181     FPRReg leftFPR = leftNumber.fpr();
3182
3183 #if USE(JSVALUE64)
3184     GPRTemporary result(this);
3185     JSValueRegs resultRegs = JSValueRegs(result.gpr());
3186     GPRTemporary scratch(this);
3187     GPRReg scratchGPR = scratch.gpr();
3188     FPRReg scratchFPR = InvalidFPRReg;
3189 #else
3190     GPRTemporary resultTag(this);
3191     GPRTemporary resultPayload(this);
3192     JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3193     GPRReg scratchGPR = resultTag.gpr();
3194     FPRTemporary fprScratch(this);
3195     FPRReg scratchFPR = fprScratch.fpr();
3196 #endif
3197
3198     SnippetOperand leftOperand;
3199     SnippetOperand rightOperand;
3200
3201     // The snippet generator does not support both operands being constant. If the left
3202     // operand is already const, we'll ignore the right operand's constness.
3203     if (leftChild->isInt32Constant())
3204         leftOperand.setConstInt32(leftChild->asInt32());
3205     else if (rightChild->isInt32Constant())
3206         rightOperand.setConstInt32(rightChild->asInt32());
3207
3208     RELEASE_ASSERT(!leftOperand.isConst() || !rightOperand.isConst());
3209
3210     if (!leftOperand.isConst()) {
3211         left = JSValueOperand(this, leftChild);
3212         leftRegs = left->jsValueRegs();
3213     }
3214     if (!rightOperand.isConst()) {
3215         right = JSValueOperand(this, rightChild);
3216         rightRegs = right->jsValueRegs();
3217     }
3218
3219     JITRightShiftGenerator gen(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs,
3220         leftFPR, scratchGPR, scratchFPR, shiftType);
3221     gen.generateFastPath(m_jit);
3222
3223     ASSERT(gen.didEmitFastPath());
3224     gen.endJumpList().append(m_jit.jump());
3225
3226     gen.slowPathJumpList().link(&m_jit);
3227     silentSpillAllRegisters(resultRegs);
3228
3229     if (leftOperand.isConst()) {
3230         leftRegs = resultRegs;
3231         m_jit.moveValue(leftChild->asJSValue(), leftRegs);
3232     } else if (rightOperand.isConst()) {
3233         rightRegs = resultRegs;
3234         m_jit.moveValue(rightChild->asJSValue(), rightRegs);
3235     }
3236
3237     callOperation(snippetSlowPathFunction, resultRegs, leftRegs, rightRegs);
3238
3239     silentFillAllRegisters(resultRegs);
3240     m_jit.exceptionCheck();
3241
3242     gen.endJumpList().link(&m_jit);
3243     jsValueResult(resultRegs, node);
3244     return;
3245 }
3246
3247 void SpeculativeJIT::compileShiftOp(Node* node)
3248 {
3249     NodeType op = node->op();
3250     Edge& leftChild = node->child1();
3251     Edge& rightChild = node->child2();
3252
3253     if (leftChild.useKind() == UntypedUse || rightChild.useKind() == UntypedUse) {
3254         switch (op) {
3255         case BitLShift:
3256             emitUntypedBitOp<JITLeftShiftGenerator, operationValueBitLShift>(node);
3257             return;
3258         case BitRShift:
3259         case BitURShift:
3260             emitUntypedRightShiftBitOp(node);
3261             return;
3262         default:
3263             RELEASE_ASSERT_NOT_REACHED();
3264         }
3265     }
3266
3267     if (rightChild->isInt32Constant()) {
3268         SpeculateInt32Operand op1(this, leftChild);
3269         GPRTemporary result(this, Reuse, op1);
3270
3271         shiftOp(op, op1.gpr(), rightChild->asInt32() & 0x1f, result.gpr());
3272
3273         int32Result(result.gpr(), node);
3274     } else {
3275         // Do not allow shift amount to be used as the result, MacroAssembler does not permit this.
3276         SpeculateInt32Operand op1(this, leftChild);
3277         SpeculateInt32Operand op2(this, rightChild);
3278         GPRTemporary result(this, Reuse, op1);
3279
3280         GPRReg reg1 = op1.gpr();
3281         GPRReg reg2 = op2.gpr();
3282         shiftOp(op, reg1, reg2, result.gpr());
3283
3284         int32Result(result.gpr(), node);
3285     }
3286 }
3287
3288 void SpeculativeJIT::compileValueAdd(Node* node)
3289 {
3290     Edge& leftChild = node->child1();
3291     Edge& rightChild = node->child2();
3292
3293     if (isKnownNotNumber(leftChild.node()) || isKnownNotNumber(rightChild.node())) {
3294         JSValueOperand left(this, leftChild);
3295         JSValueOperand right(this, rightChild);
3296         JSValueRegs leftRegs = left.jsValueRegs();
3297         JSValueRegs rightRegs = right.jsValueRegs();
3298 #if USE(JSVALUE64)
3299         GPRTemporary result(this);
3300         JSValueRegs resultRegs = JSValueRegs(result.gpr());
3301 #else
3302         GPRTemporary resultTag(this);
3303         GPRTemporary resultPayload(this);
3304         JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3305 #endif
3306         flushRegisters();
3307         callOperation(operationValueAddNotNumber, resultRegs, leftRegs, rightRegs);
3308         m_jit.exceptionCheck();
3309     
3310         jsValueResult(resultRegs, node);
3311         return;
3312     }
3313
3314     Optional<JSValueOperand> left;
3315     Optional<JSValueOperand> right;
3316
3317     JSValueRegs leftRegs;
3318     JSValueRegs rightRegs;
3319
3320     FPRTemporary leftNumber(this);
3321     FPRTemporary rightNumber(this);
3322     FPRReg leftFPR = leftNumber.fpr();
3323     FPRReg rightFPR = rightNumber.fpr();
3324
3325 #if USE(JSVALUE64)
3326     GPRTemporary result(this);
3327     JSValueRegs resultRegs = JSValueRegs(result.gpr());
3328     GPRTemporary scratch(this);
3329     GPRReg scratchGPR = scratch.gpr();
3330     FPRReg scratchFPR = InvalidFPRReg;
3331 #else
3332     GPRTemporary resultTag(this);
3333     GPRTemporary resultPayload(this);
3334     JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3335     GPRReg scratchGPR = resultTag.gpr();
3336     FPRTemporary fprScratch(this);
3337     FPRReg scratchFPR = fprScratch.fpr();
3338 #endif
3339
3340     SnippetOperand leftOperand(m_state.forNode(leftChild).resultType());
3341     SnippetOperand rightOperand(m_state.forNode(rightChild).resultType());
3342
3343     // The snippet generator does not support both operands being constant. If the left
3344     // operand is already const, we'll ignore the right operand's constness.
3345     if (leftChild->isInt32Constant())
3346         leftOperand.setConstInt32(leftChild->asInt32());
3347     else if (rightChild->isInt32Constant())
3348         rightOperand.setConstInt32(rightChild->asInt32());
3349
3350     ASSERT(!leftOperand.isConst() || !rightOperand.isConst());
3351
3352     if (!leftOperand.isConst()) {
3353         left = JSValueOperand(this, leftChild);
3354         leftRegs = left->jsValueRegs();
3355     }
3356     if (!rightOperand.isConst()) {
3357         right = JSValueOperand(this, rightChild);
3358         rightRegs = right->jsValueRegs();
3359     }
3360
3361     JITAddGenerator gen(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs,
3362         leftFPR, rightFPR, scratchGPR, scratchFPR);
3363     gen.generateFastPath(m_jit);
3364
3365     ASSERT(gen.didEmitFastPath());
3366     gen.endJumpList().append(m_jit.jump());
3367
3368     gen.slowPathJumpList().link(&m_jit);
3369
3370     silentSpillAllRegisters(resultRegs);
3371
3372     if (leftOperand.isConst()) {
3373         leftRegs = resultRegs;
3374         m_jit.moveValue(leftChild->asJSValue(), leftRegs);
3375     } else if (rightOperand.isConst()) {
3376         rightRegs = resultRegs;
3377         m_jit.moveValue(rightChild->asJSValue(), rightRegs);
3378     }
3379
3380     callOperation(operationValueAdd, resultRegs, leftRegs, rightRegs);
3381
3382     silentFillAllRegisters(resultRegs);
3383     m_jit.exceptionCheck();
3384
3385     gen.endJumpList().link(&m_jit);
3386     jsValueResult(resultRegs, node);
3387     return;
3388 }
3389
3390 void SpeculativeJIT::compileInstanceOfCustom(Node* node)
3391 {
3392     // We could do something smarter here but this case is currently super rare and unless
3393     // Symbol.hasInstance becomes popular will likely remain that way.
3394
3395     JSValueOperand value(this, node->child1());
3396     SpeculateCellOperand constructor(this, node->child2());
3397     JSValueOperand hasInstanceValue(this, node->child3());
3398     GPRTemporary result(this);
3399
3400     JSValueRegs valueRegs = value.jsValueRegs();
3401     GPRReg constructorGPR = constructor.gpr();
3402     JSValueRegs hasInstanceRegs = hasInstanceValue.jsValueRegs();
3403     GPRReg resultGPR = result.gpr();
3404
3405     MacroAssembler::Jump slowCase = m_jit.jump();
3406
3407     addSlowPathGenerator(slowPathCall(slowCase, this, operationInstanceOfCustom, resultGPR, valueRegs, constructorGPR, hasInstanceRegs));
3408
3409     unblessedBooleanResult(resultGPR, node);
3410 }
3411
3412 void SpeculativeJIT::compileIsJSArray(Node* node)
3413 {
3414     JSValueOperand value(this, node->child1());
3415     GPRFlushedCallResult result(this);
3416
3417     JSValueRegs valueRegs = value.jsValueRegs();
3418     GPRReg resultGPR = result.gpr();
3419
3420     JITCompiler::Jump isNotCell = m_jit.branchIfNotCell(valueRegs);
3421
3422     m_jit.compare8(JITCompiler::Equal,
3423         JITCompiler::Address(valueRegs.payloadGPR(), JSCell::typeInfoTypeOffset()),
3424         TrustedImm32(ArrayType),
3425         resultGPR);
3426     blessBoolean(resultGPR);
3427     JITCompiler::Jump done = m_jit.jump();
3428
3429     isNotCell.link(&m_jit);
3430     moveFalseTo(resultGPR);
3431
3432     done.link(&m_jit);
3433     blessedBooleanResult(resultGPR, node);
3434 }
3435
3436 void SpeculativeJIT::compileIsRegExpObject(Node* node)
3437 {
3438     JSValueOperand value(this, node->child1());
3439     GPRFlushedCallResult result(this);
3440
3441     JSValueRegs valueRegs = value.jsValueRegs();
3442     GPRReg resultGPR = result.gpr();
3443
3444     JITCompiler::Jump isNotCell = m_jit.branchIfNotCell(valueRegs);
3445
3446     m_jit.compare8(JITCompiler::Equal,
3447         JITCompiler::Address(valueRegs.payloadGPR(), JSCell::typeInfoTypeOffset()),
3448         TrustedImm32(RegExpObjectType),
3449         resultGPR);
3450     blessBoolean(resultGPR);
3451     JITCompiler::Jump done = m_jit.jump();
3452
3453     isNotCell.link(&m_jit);
3454     moveFalseTo(resultGPR);
3455
3456     done.link(&m_jit);
3457     blessedBooleanResult(resultGPR, node);
3458 }
3459
3460 void SpeculativeJIT::compileIsTypedArrayView(Node* node)
3461 {
3462     JSValueOperand value(this, node->child1());
3463 #if USE(JSVALUE64)
3464     GPRTemporary result(this, Reuse, value);
3465 #else
3466     GPRTemporary result(this, Reuse, value, PayloadWord);
3467 #endif
3468
3469     JSValueRegs valueRegs = value.jsValueRegs();
3470     GPRReg resultGPR = result.gpr();
3471
3472     JITCompiler::Jump isNotCell = m_jit.branchIfNotCell(valueRegs);
3473
3474     m_jit.load8(JITCompiler::Address(valueRegs.payloadGPR(), JSCell::typeInfoTypeOffset()), resultGPR);
3475     m_jit.sub32(TrustedImm32(Int8ArrayType), resultGPR);
3476     m_jit.compare32(JITCompiler::BelowOrEqual,
3477         resultGPR,
3478         TrustedImm32(Float64ArrayType - Int8ArrayType),
3479         resultGPR);
3480     blessBoolean(resultGPR);
3481     JITCompiler::Jump done = m_jit.jump();
3482
3483     isNotCell.link(&m_jit);
3484     moveFalseTo(resultGPR);
3485
3486     done.link(&m_jit);
3487     blessedBooleanResult(resultGPR, node);
3488 }
3489
3490 void SpeculativeJIT::compileCallObjectConstructor(Node* node)
3491 {
3492     RELEASE_ASSERT(node->child1().useKind() == UntypedUse);
3493     JSValueOperand value(this, node->child1());
3494 #if USE(JSVALUE64)
3495     GPRTemporary result(this, Reuse, value);
3496 #else
3497     GPRTemporary result(this, Reuse, value, PayloadWord);
3498 #endif
3499
3500     JSValueRegs valueRegs = value.jsValueRegs();
3501     GPRReg resultGPR = result.gpr();
3502
3503     MacroAssembler::JumpList slowCases;
3504     slowCases.append(m_jit.branchIfNotCell(valueRegs));
3505     slowCases.append(m_jit.branchIfNotObject(valueRegs.payloadGPR()));
3506     m_jit.move(valueRegs.payloadGPR(), resultGPR);
3507
3508     addSlowPathGenerator(slowPathCall(slowCases, this, operationObjectConstructor, resultGPR, m_jit.globalObjectFor(node->origin.semantic), valueRegs));
3509     cellResult(resultGPR, node);
3510 }
3511
3512 void SpeculativeJIT::compileArithAdd(Node* node)
3513 {
3514     switch (node->binaryUseKind()) {
3515     case Int32Use: {
3516         ASSERT(!shouldCheckNegativeZero(node->arithMode()));
3517
3518         if (node->child2()->isInt32Constant()) {
3519             SpeculateInt32Operand op1(this, node->child1());
3520             GPRTemporary result(this, Reuse, op1);
3521
3522             GPRReg gpr1 = op1.gpr();
3523             int32_t imm2 = node->child2()->asInt32();
3524             GPRReg gprResult = result.gpr();
3525
3526             if (!shouldCheckOverflow(node->arithMode())) {
3527                 m_jit.add32(Imm32(imm2), gpr1, gprResult);
3528                 int32Result(gprResult, node);
3529                 return;
3530             }
3531
3532             MacroAssembler::Jump check = m_jit.branchAdd32(MacroAssembler::Overflow, gpr1, Imm32(imm2), gprResult);
3533             if (gpr1 == gprResult) {
3534                 speculationCheck(Overflow, JSValueRegs(), 0, check,
3535                     SpeculationRecovery(SpeculativeAddImmediate, gpr1, imm2));
3536             } else
3537                 speculationCheck(Overflow, JSValueRegs(), 0, check);
3538
3539             int32Result(gprResult, node);
3540             return;
3541         }
3542                 
3543         SpeculateInt32Operand op1(this, node->child1());
3544         SpeculateInt32Operand op2(this, node->child2());
3545         GPRTemporary result(this, Reuse, op1, op2);
3546
3547         GPRReg gpr1 = op1.gpr();
3548         GPRReg gpr2 = op2.gpr();
3549         GPRReg gprResult = result.gpr();
3550
3551         if (!shouldCheckOverflow(node->arithMode()))
3552             m_jit.add32(gpr1, gpr2, gprResult);
3553         else {
3554             MacroAssembler::Jump check = m_jit.branchAdd32(MacroAssembler::Overflow, gpr1, gpr2, gprResult);
3555                 
3556             if (gpr1 == gprResult)
3557                 speculationCheck(Overflow, JSValueRegs(), 0, check, SpeculationRecovery(SpeculativeAdd, gprResult, gpr2));
3558             else if (gpr2 == gprResult)
3559                 speculationCheck(Overflow, JSValueRegs(), 0, check, SpeculationRecovery(SpeculativeAdd, gprResult, gpr1));
3560             else
3561                 speculationCheck(Overflow, JSValueRegs(), 0, check);
3562         }
3563
3564         int32Result(gprResult, node);
3565         return;
3566     }
3567         
3568 #if USE(JSVALUE64)
3569     case Int52RepUse: {
3570         ASSERT(shouldCheckOverflow(node->arithMode()));
3571         ASSERT(!shouldCheckNegativeZero(node->arithMode()));
3572
3573         // Will we need an overflow check? If we can prove that neither input can be
3574         // Int52 then the overflow check will not be necessary.
3575         if (!m_state.forNode(node->child1()).couldBeType(SpecInt52Only)
3576             && !m_state.forNode(node->child2()).couldBeType(SpecInt52Only)) {
3577             SpeculateWhicheverInt52Operand op1(this, node->child1());
3578             SpeculateWhicheverInt52Operand op2(this, node->child2(), op1);
3579             GPRTemporary result(this, Reuse, op1);
3580             m_jit.add64(op1.gpr(), op2.gpr(), result.gpr());
3581             int52Result(result.gpr(), node, op1.format());
3582             return;
3583         }
3584         
3585         SpeculateInt52Operand op1(this, node->child1());
3586         SpeculateInt52Operand op2(this, node->child2());
3587         GPRTemporary result(this);
3588         m_jit.move(op1.gpr(), result.gpr());
3589         speculationCheck(
3590             Int52Overflow, JSValueRegs(), 0,
3591             m_jit.branchAdd64(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
3592         int52Result(result.gpr(), node);
3593         return;
3594     }
3595 #endif // USE(JSVALUE64)
3596     
3597     case DoubleRepUse: {
3598         SpeculateDoubleOperand op1(this, node->child1());
3599         SpeculateDoubleOperand op2(this, node->child2());
3600         FPRTemporary result(this, op1, op2);
3601
3602         FPRReg reg1 = op1.fpr();
3603         FPRReg reg2 = op2.fpr();
3604         m_jit.addDouble(reg1, reg2, result.fpr());
3605
3606         doubleResult(result.fpr(), node);
3607         return;
3608     }
3609         
3610     default:
3611         RELEASE_ASSERT_NOT_REACHED();
3612         break;
3613     }
3614 }
3615
3616 void SpeculativeJIT::compileMakeRope(Node* node)
3617 {
3618     ASSERT(node->child1().useKind() == KnownStringUse);
3619     ASSERT(node->child2().useKind() == KnownStringUse);
3620     ASSERT(!node->child3() || node->child3().useKind() == KnownStringUse);
3621     
3622     SpeculateCellOperand op1(this, node->child1());
3623     SpeculateCellOperand op2(this, node->child2());
3624     SpeculateCellOperand op3(this, node->child3());
3625     GPRTemporary result(this);
3626     GPRTemporary allocator(this);
3627     GPRTemporary scratch(this);
3628     
3629     GPRReg opGPRs[3];
3630     unsigned numOpGPRs;
3631     opGPRs[0] = op1.gpr();
3632     opGPRs[1] = op2.gpr();
3633     if (node->child3()) {
3634         opGPRs[2] = op3.gpr();
3635         numOpGPRs = 3;
3636     } else {
3637         opGPRs[2] = InvalidGPRReg;
3638         numOpGPRs = 2;
3639     }
3640     GPRReg resultGPR = result.gpr();
3641     GPRReg allocatorGPR = allocator.gpr();
3642     GPRReg scratchGPR = scratch.gpr();
3643     
3644     JITCompiler::JumpList slowPath;
3645     MarkedAllocator& markedAllocator = m_jit.vm()->heap.allocatorForObjectWithDestructor(sizeof(JSRopeString));
3646     m_jit.move(TrustedImmPtr(&markedAllocator), allocatorGPR);
3647     emitAllocateJSCell(resultGPR, allocatorGPR, TrustedImmPtr(m_jit.vm()->stringStructure.get()), scratchGPR, slowPath);
3648         
3649     m_jit.storePtr(TrustedImmPtr(0), JITCompiler::Address(resultGPR, JSString::offsetOfValue()));
3650     for (unsigned i = 0; i < numOpGPRs; ++i)
3651         m_jit.storePtr(opGPRs[i], JITCompiler::Address(resultGPR, JSRopeString::offsetOfFibers() + sizeof(WriteBarrier<JSString>) * i));
3652     for (unsigned i = numOpGPRs; i < JSRopeString::s_maxInternalRopeLength; ++i)
3653         m_jit.storePtr(TrustedImmPtr(0), JITCompiler::Address(resultGPR, JSRopeString::offsetOfFibers() + sizeof(WriteBarrier<JSString>) * i));
3654     m_jit.load32(JITCompiler::Address(opGPRs[0], JSString::offsetOfFlags()), scratchGPR);
3655     m_jit.load32(JITCompiler::Address(opGPRs[0], JSString::offsetOfLength()), allocatorGPR);
3656     if (!ASSERT_DISABLED) {
3657         JITCompiler::Jump ok = m_jit.branch32(
3658             JITCompiler::GreaterThanOrEqual, allocatorGPR, TrustedImm32(0));
3659         m_jit.abortWithReason(DFGNegativeStringLength);
3660         ok.link(&m_jit);
3661     }
3662     for (unsigned i = 1; i < numOpGPRs; ++i) {
3663         m_jit.and32(JITCompiler::Address(opGPRs[i], JSString::offsetOfFlags()), scratchGPR);
3664         speculationCheck(
3665             Uncountable, JSValueSource(), nullptr,
3666             m_jit.branchAdd32(
3667                 JITCompiler::Overflow,
3668                 JITCompiler::Address(opGPRs[i], JSString::offsetOfLength()), allocatorGPR));
3669     }
3670     m_jit.and32(JITCompiler::TrustedImm32(JSString::Is8Bit), scratchGPR);
3671     m_jit.store32(scratchGPR, JITCompiler::Address(resultGPR, JSString::offsetOfFlags()));
3672     if (!ASSERT_DISABLED) {
3673         JITCompiler::Jump ok = m_jit.branch32(
3674             JITCompiler::GreaterThanOrEqual, allocatorGPR, TrustedImm32(0));
3675         m_jit.abortWithReason(DFGNegativeStringLength);
3676         ok.link(&m_jit);
3677     }
3678     m_jit.store32(allocatorGPR, JITCompiler::Address(resultGPR, JSString::offsetOfLength()));
3679     
3680     switch (numOpGPRs) {
3681     case 2:
3682         addSlowPathGenerator(slowPathCall(
3683             slowPath, this, operationMakeRope2, resultGPR, opGPRs[0], opGPRs[1]));
3684         break;
3685     case 3:
3686         addSlowPathGenerator(slowPathCall(
3687             slowPath, this, operationMakeRope3, resultGPR, opGPRs[0], opGPRs[1], opGPRs[2]));
3688         break;
3689     default:
3690         RELEASE_ASSERT_NOT_REACHED();
3691         break;
3692     }
3693         
3694     cellResult(resultGPR, node);
3695 }
3696
3697 void SpeculativeJIT::compileArithClz32(Node* node)
3698 {
3699     ASSERT_WITH_MESSAGE(node->child1().useKind() == Int32Use || node->child1().useKind() == KnownInt32Use, "The Fixup phase should have enforced a Int32 operand.");
3700     SpeculateInt32Operand value(this, node->child1());
3701     GPRTemporary result(this, Reuse, value);
3702     GPRReg valueReg = value.gpr();
3703     GPRReg resultReg = result.gpr();
3704     m_jit.countLeadingZeros32(valueReg, resultReg);
3705     int32Result(resultReg, node);
3706 }
3707
3708 void SpeculativeJIT::compileArithSub(Node* node)
3709 {
3710     switch (node->binaryUseKind()) {
3711     case Int32Use: {
3712         ASSERT(!shouldCheckNegativeZero(node->arithMode()));
3713         
3714         if (node->child2()->isInt32Constant()) {
3715             SpeculateInt32Operand op1(this, node->child1());
3716             int32_t imm2 = node->child2()->asInt32();
3717             GPRTemporary result(this);
3718
3719             if (!shouldCheckOverflow(node->arithMode())) {
3720                 m_jit.move(op1.gpr(), result.gpr());
3721                 m_jit.sub32(Imm32(imm2), result.gpr());
3722             } else {
3723                 GPRTemporary scratch(this);
3724                 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op1.gpr(), Imm32(imm2), result.gpr(), scratch.gpr()));
3725             }
3726
3727             int32Result(result.gpr(), node);
3728             return;
3729         }
3730             
3731         if (node->child1()->isInt32Constant()) {
3732             int32_t imm1 = node->child1()->asInt32();
3733             SpeculateInt32Operand op2(this, node->child2());
3734             GPRTemporary result(this);
3735                 
3736             m_jit.move(Imm32(imm1), result.gpr());
3737             if (!shouldCheckOverflow(node->arithMode()))
3738                 m_jit.sub32(op2.gpr(), result.gpr());
3739             else
3740                 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
3741                 
3742             int32Result(result.gpr(), node);
3743             return;
3744         }
3745             
3746         SpeculateInt32Operand op1(this, node->child1());
3747         SpeculateInt32Operand op2(this, node->child2());
3748         GPRTemporary result(this);
3749
3750         if (!shouldCheckOverflow(node->arithMode())) {
3751             m_jit.move(op1.gpr(), result.gpr());
3752             m_jit.sub32(op2.gpr(), result.gpr());
3753         } else
3754             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op1.gpr(), op2.gpr(), result.gpr()));
3755
3756         int32Result(result.gpr(), node);
3757         return;
3758     }
3759         
3760 #if USE(JSVALUE64)
3761     case Int52RepUse: {
3762         ASSERT(shouldCheckOverflow(node->arithMode()));
3763         ASSERT(!shouldCheckNegativeZero(node->arithMode()));
3764
3765         // Will we need an overflow check? If we can prove that neither input can be
3766         // Int52 then the overflow check will not be necessary.
3767         if (!m_state.forNode(node->child1()).couldBeType(SpecInt52Only)
3768             && !m_state.forNode(node->child2()).couldBeType(SpecInt52Only)) {
3769             SpeculateWhicheverInt52Operand op1(this, node->child1());
3770             SpeculateWhicheverInt52Operand op2(this, node->child2(), op1);
3771             GPRTemporary result(this, Reuse, op1);
3772             m_jit.move(op1.gpr(), result.gpr());
3773             m_jit.sub64(op2.gpr(), result.gpr());
3774             int52Result(result.gpr(), node, op1.format());
3775             return;
3776         }
3777         
3778         SpeculateInt52Operand op1(this, node->child1());
3779         SpeculateInt52Operand op2(this, node->child2());
3780         GPRTemporary result(this);
3781         m_jit.move(op1.gpr(), result.gpr());
3782         speculationCheck(
3783             Int52Overflow, JSValueRegs(), 0,
3784             m_jit.branchSub64(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
3785         int52Result(result.gpr(), node);
3786         return;
3787     }
3788 #endif // USE(JSVALUE64)
3789
3790     case DoubleRepUse: {
3791         SpeculateDoubleOperand op1(this, node->child1());
3792         SpeculateDoubleOperand op2(this, node->child2());
3793         FPRTemporary result(this, op1);
3794
3795         FPRReg reg1 = op1.fpr();
3796         FPRReg reg2 = op2.fpr();
3797         m_jit.subDouble(reg1, reg2, result.fpr());
3798
3799         doubleResult(result.fpr(), node);
3800         return;
3801     }
3802
3803     case UntypedUse: {
3804         Edge& leftChild = node->child1();
3805         Edge& rightChild = node->child2();
3806
3807         JSValueOperand left(this, leftChild);
3808         JSValueOperand right(this, rightChild);
3809
3810         JSValueRegs leftRegs = left.jsValueRegs();
3811         JSValueRegs rightRegs = right.jsValueRegs();
3812
3813         FPRTemporary leftNumber(this);
3814         FPRTemporary rightNumber(this);
3815         FPRReg leftFPR = leftNumber.fpr();
3816         FPRReg rightFPR = rightNumber.fpr();
3817
3818 #if USE(JSVALUE64)
3819         GPRTemporary result(this);
3820         JSValueRegs resultRegs = JSValueRegs(result.gpr());
3821         GPRTemporary scratch(this);
3822         GPRReg scratchGPR = scratch.gpr();
3823         FPRReg scratchFPR = InvalidFPRReg;
3824 #else
3825         GPRTemporary resultTag(this);
3826         GPRTemporary resultPayload(this);
3827         JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3828         GPRReg scratchGPR = resultTag.gpr();
3829         FPRTemporary fprScratch(this);
3830         FPRReg scratchFPR = fprScratch.fpr();
3831 #endif
3832
3833         SnippetOperand leftOperand(m_state.forNode(leftChild).resultType());
3834         SnippetOperand rightOperand(m_state.forNode(rightChild).resultType());
3835
3836         JITSubGenerator gen(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs,
3837             leftFPR, rightFPR, scratchGPR, scratchFPR);
3838         gen.generateFastPath(m_jit);
3839
3840         ASSERT(gen.didEmitFastPath());
3841         gen.endJumpList().append(m_jit.jump());
3842
3843         gen.slowPathJumpList().link(&m_jit);
3844         silentSpillAllRegisters(resultRegs);
3845         callOperation(operationValueSub, resultRegs, leftRegs, rightRegs);
3846         silentFillAllRegisters(resultRegs);
3847         m_jit.exceptionCheck();
3848
3849         gen.endJumpList().link(&m_jit);
3850         jsValueResult(resultRegs, node);
3851         return;
3852     }
3853
3854     default:
3855         RELEASE_ASSERT_NOT_REACHED();
3856         return;
3857     }
3858 }
3859
3860 void SpeculativeJIT::compileArithNegate(Node* node)
3861 {
3862     switch (node->child1().useKind()) {
3863     case Int32Use: {
3864         SpeculateInt32Operand op1(this, node->child1());
3865         GPRTemporary result(this);
3866
3867         m_jit.move(op1.gpr(), result.gpr());
3868
3869         // Note: there is no notion of being not used as a number, but someone
3870         // caring about negative zero.
3871         
3872         if (!shouldCheckOverflow(node->arithMode()))
3873             m_jit.neg32(result.gpr());
3874         else if (!shouldCheckNegativeZero(node->arithMode()))
3875             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchNeg32(MacroAssembler::Overflow, result.gpr()));
3876         else {
3877             speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(MacroAssembler::Zero, result.gpr(), TrustedImm32(0x7fffffff)));
3878             m_jit.neg32(result.gpr());
3879         }
3880
3881         int32Result(result.gpr(), node);
3882         return;
3883     }
3884
3885 #if USE(JSVALUE64)
3886     case Int52RepUse: {
3887         ASSERT(shouldCheckOverflow(node->arithMode()));
3888         
3889  &nbs