We should be able to eliminate rest parameter allocations
[WebKit-https.git] / Source / JavaScriptCore / dfg / DFGSpeculativeJIT.cpp
1 /*
2  * Copyright (C) 2011-2016 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "DFGSpeculativeJIT.h"
28
29 #if ENABLE(DFG_JIT)
30
31 #include "BinarySwitch.h"
32 #include "DFGAbstractInterpreterInlines.h"
33 #include "DFGArrayifySlowPathGenerator.h"
34 #include "DFGCallArrayAllocatorSlowPathGenerator.h"
35 #include "DFGCallCreateDirectArgumentsSlowPathGenerator.h"
36 #include "DFGCapabilities.h"
37 #include "DFGDOMJITPatchpointParams.h"
38 #include "DFGMayExit.h"
39 #include "DFGOSRExitFuzz.h"
40 #include "DFGSaneStringGetByValSlowPathGenerator.h"
41 #include "DFGSlowPathGenerator.h"
42 #include "DOMJITPatchpoint.h"
43 #include "DirectArguments.h"
44 #include "JITAddGenerator.h"
45 #include "JITBitAndGenerator.h"
46 #include "JITBitOrGenerator.h"
47 #include "JITBitXorGenerator.h"
48 #include "JITDivGenerator.h"
49 #include "JITLeftShiftGenerator.h"
50 #include "JITMulGenerator.h"
51 #include "JITRightShiftGenerator.h"
52 #include "JITSubGenerator.h"
53 #include "JSCInlines.h"
54 #include "JSEnvironmentRecord.h"
55 #include "JSGeneratorFunction.h"
56 #include "JSLexicalEnvironment.h"
57 #include "LinkBuffer.h"
58 #include "RegExpConstructor.h"
59 #include "ScopedArguments.h"
60 #include "ScratchRegisterAllocator.h"
61 #include "WriteBarrierBuffer.h"
62 #include <wtf/Box.h>
63 #include <wtf/MathExtras.h>
64
65 namespace JSC { namespace DFG {
66
67 SpeculativeJIT::SpeculativeJIT(JITCompiler& jit)
68     : m_compileOkay(true)
69     , m_jit(jit)
70     , m_currentNode(0)
71     , m_lastGeneratedNode(LastNodeType)
72     , m_indexInBlock(0)
73     , m_generationInfo(m_jit.graph().frameRegisterCount())
74     , m_state(m_jit.graph())
75     , m_interpreter(m_jit.graph(), m_state)
76     , m_stream(&jit.jitCode()->variableEventStream)
77     , m_minifiedGraph(&jit.jitCode()->minifiedDFG)
78 {
79 }
80
81 SpeculativeJIT::~SpeculativeJIT()
82 {
83 }
84
85 void SpeculativeJIT::emitAllocateRawObject(GPRReg resultGPR, Structure* structure, GPRReg storageGPR, unsigned numElements, unsigned vectorLength)
86 {
87     IndexingType indexingType = structure->indexingType();
88     bool hasIndexingHeader = hasIndexedProperties(indexingType);
89
90     unsigned inlineCapacity = structure->inlineCapacity();
91     unsigned outOfLineCapacity = structure->outOfLineCapacity();
92     
93     GPRTemporary scratch(this);
94     GPRTemporary scratch2(this);
95     GPRReg scratchGPR = scratch.gpr();
96     GPRReg scratch2GPR = scratch2.gpr();
97
98     ASSERT(vectorLength >= numElements);
99     vectorLength = Butterfly::optimalContiguousVectorLength(structure, vectorLength);
100     
101     JITCompiler::JumpList slowCases;
102
103     size_t size = 0;
104     if (hasIndexingHeader)
105         size += vectorLength * sizeof(JSValue) + sizeof(IndexingHeader);
106     size += outOfLineCapacity * sizeof(JSValue);
107
108     m_jit.move(TrustedImmPtr(0), storageGPR);
109     
110     if (size) {
111         if (MarkedAllocator* allocator = m_jit.vm()->heap.allocatorForAuxiliaryData(size)) {
112             m_jit.move(TrustedImmPtr(allocator), scratchGPR);
113             m_jit.emitAllocate(storageGPR, allocator, scratchGPR, scratch2GPR, slowCases);
114             
115             m_jit.addPtr(
116                 TrustedImm32(outOfLineCapacity * sizeof(JSValue) + sizeof(IndexingHeader)),
117                 storageGPR);
118             
119             if (hasIndexingHeader)
120                 m_jit.store32(TrustedImm32(vectorLength), MacroAssembler::Address(storageGPR, Butterfly::offsetOfVectorLength()));
121         } else
122             slowCases.append(m_jit.jump());
123     }
124
125     size_t allocationSize = JSFinalObject::allocationSize(inlineCapacity);
126     MarkedAllocator* allocatorPtr = m_jit.vm()->heap.allocatorForObjectWithoutDestructor(allocationSize);
127     if (allocatorPtr) {
128         m_jit.move(TrustedImmPtr(allocatorPtr), scratchGPR);
129         emitAllocateJSObject(resultGPR, allocatorPtr, scratchGPR, TrustedImmPtr(structure), storageGPR, scratch2GPR, slowCases);
130     } else
131         slowCases.append(m_jit.jump());
132
133     // I want a slow path that also loads out the storage pointer, and that's
134     // what this custom CallArrayAllocatorSlowPathGenerator gives me. It's a lot
135     // of work for a very small piece of functionality. :-/
136     addSlowPathGenerator(std::make_unique<CallArrayAllocatorSlowPathGenerator>(
137         slowCases, this, operationNewRawObject, resultGPR, storageGPR,
138         structure, vectorLength));
139
140     if (numElements < vectorLength) {
141 #if USE(JSVALUE64)
142         if (hasDouble(structure->indexingType()))
143             m_jit.move(TrustedImm64(bitwise_cast<int64_t>(PNaN)), scratchGPR);
144         else
145             m_jit.move(TrustedImm64(JSValue::encode(JSValue())), scratchGPR);
146         for (unsigned i = numElements; i < vectorLength; ++i)
147             m_jit.store64(scratchGPR, MacroAssembler::Address(storageGPR, sizeof(double) * i));
148 #else
149         EncodedValueDescriptor value;
150         if (hasDouble(structure->indexingType()))
151             value.asInt64 = JSValue::encode(JSValue(JSValue::EncodeAsDouble, PNaN));
152         else
153             value.asInt64 = JSValue::encode(JSValue());
154         for (unsigned i = numElements; i < vectorLength; ++i) {
155             m_jit.store32(TrustedImm32(value.asBits.tag), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
156             m_jit.store32(TrustedImm32(value.asBits.payload), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
157         }
158 #endif
159     }
160     
161     if (hasIndexingHeader)
162         m_jit.store32(TrustedImm32(numElements), MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()));
163 }
164
165 void SpeculativeJIT::emitGetLength(InlineCallFrame* inlineCallFrame, GPRReg lengthGPR, bool includeThis)
166 {
167     if (inlineCallFrame && !inlineCallFrame->isVarargs())
168         m_jit.move(TrustedImm32(inlineCallFrame->arguments.size() - !includeThis), lengthGPR);
169     else {
170         VirtualRegister argumentCountRegister;
171         if (!inlineCallFrame)
172             argumentCountRegister = VirtualRegister(CallFrameSlot::argumentCount);
173         else
174             argumentCountRegister = inlineCallFrame->argumentCountRegister;
175         m_jit.load32(JITCompiler::payloadFor(argumentCountRegister), lengthGPR);
176         if (!includeThis)
177             m_jit.sub32(TrustedImm32(1), lengthGPR);
178     }
179 }
180
181 void SpeculativeJIT::emitGetLength(CodeOrigin origin, GPRReg lengthGPR, bool includeThis)
182 {
183     emitGetLength(origin.inlineCallFrame, lengthGPR, includeThis);
184 }
185
186 void SpeculativeJIT::emitGetCallee(CodeOrigin origin, GPRReg calleeGPR)
187 {
188     if (origin.inlineCallFrame) {
189         if (origin.inlineCallFrame->isClosureCall) {
190             m_jit.loadPtr(
191                 JITCompiler::addressFor(origin.inlineCallFrame->calleeRecovery.virtualRegister()),
192                 calleeGPR);
193         } else {
194             m_jit.move(
195                 TrustedImmPtr(origin.inlineCallFrame->calleeRecovery.constant().asCell()),
196                 calleeGPR);
197         }
198     } else
199         m_jit.loadPtr(JITCompiler::addressFor(CallFrameSlot::callee), calleeGPR);
200 }
201
202 void SpeculativeJIT::emitGetArgumentStart(CodeOrigin origin, GPRReg startGPR)
203 {
204     m_jit.addPtr(
205         TrustedImm32(
206             JITCompiler::argumentsStart(origin).offset() * static_cast<int>(sizeof(Register))),
207         GPRInfo::callFrameRegister, startGPR);
208 }
209
210 MacroAssembler::Jump SpeculativeJIT::emitOSRExitFuzzCheck()
211 {
212     if (!Options::useOSRExitFuzz()
213         || !canUseOSRExitFuzzing(m_jit.graph().baselineCodeBlockFor(m_origin.semantic))
214         || !doOSRExitFuzzing())
215         return MacroAssembler::Jump();
216     
217     MacroAssembler::Jump result;
218     
219     m_jit.pushToSave(GPRInfo::regT0);
220     m_jit.load32(&g_numberOfOSRExitFuzzChecks, GPRInfo::regT0);
221     m_jit.add32(TrustedImm32(1), GPRInfo::regT0);
222     m_jit.store32(GPRInfo::regT0, &g_numberOfOSRExitFuzzChecks);
223     unsigned atOrAfter = Options::fireOSRExitFuzzAtOrAfter();
224     unsigned at = Options::fireOSRExitFuzzAt();
225     if (at || atOrAfter) {
226         unsigned threshold;
227         MacroAssembler::RelationalCondition condition;
228         if (atOrAfter) {
229             threshold = atOrAfter;
230             condition = MacroAssembler::Below;
231         } else {
232             threshold = at;
233             condition = MacroAssembler::NotEqual;
234         }
235         MacroAssembler::Jump ok = m_jit.branch32(
236             condition, GPRInfo::regT0, MacroAssembler::TrustedImm32(threshold));
237         m_jit.popToRestore(GPRInfo::regT0);
238         result = m_jit.jump();
239         ok.link(&m_jit);
240     }
241     m_jit.popToRestore(GPRInfo::regT0);
242     
243     return result;
244 }
245
246 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail)
247 {
248     if (!m_compileOkay)
249         return;
250     JITCompiler::Jump fuzzJump = emitOSRExitFuzzCheck();
251     if (fuzzJump.isSet()) {
252         JITCompiler::JumpList jumpsToFail;
253         jumpsToFail.append(fuzzJump);
254         jumpsToFail.append(jumpToFail);
255         m_jit.appendExitInfo(jumpsToFail);
256     } else
257         m_jit.appendExitInfo(jumpToFail);
258     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
259 }
260
261 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, const MacroAssembler::JumpList& jumpsToFail)
262 {
263     if (!m_compileOkay)
264         return;
265     JITCompiler::Jump fuzzJump = emitOSRExitFuzzCheck();
266     if (fuzzJump.isSet()) {
267         JITCompiler::JumpList myJumpsToFail;
268         myJumpsToFail.append(jumpsToFail);
269         myJumpsToFail.append(fuzzJump);
270         m_jit.appendExitInfo(myJumpsToFail);
271     } else
272         m_jit.appendExitInfo(jumpsToFail);
273     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
274 }
275
276 OSRExitJumpPlaceholder SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node)
277 {
278     if (!m_compileOkay)
279         return OSRExitJumpPlaceholder();
280     unsigned index = m_jit.jitCode()->osrExit.size();
281     m_jit.appendExitInfo();
282     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
283     return OSRExitJumpPlaceholder(index);
284 }
285
286 OSRExitJumpPlaceholder SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse)
287 {
288     return speculationCheck(kind, jsValueSource, nodeUse.node());
289 }
290
291 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail)
292 {
293     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail);
294 }
295
296 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, const MacroAssembler::JumpList& jumpsToFail)
297 {
298     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpsToFail);
299 }
300
301 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
302 {
303     if (!m_compileOkay)
304         return;
305     unsigned recoveryIndex = m_jit.jitCode()->appendSpeculationRecovery(recovery);
306     m_jit.appendExitInfo(jumpToFail);
307     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size(), recoveryIndex));
308 }
309
310 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
311 {
312     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail, recovery);
313 }
314
315 void SpeculativeJIT::emitInvalidationPoint(Node* node)
316 {
317     if (!m_compileOkay)
318         return;
319     OSRExitCompilationInfo& info = m_jit.appendExitInfo(JITCompiler::JumpList());
320     m_jit.jitCode()->appendOSRExit(OSRExit(
321         UncountableInvalidation, JSValueSource(),
322         m_jit.graph().methodOfGettingAValueProfileFor(node),
323         this, m_stream->size()));
324     info.m_replacementSource = m_jit.watchpointLabel();
325     ASSERT(info.m_replacementSource.isSet());
326     noResult(node);
327 }
328
329 void SpeculativeJIT::unreachable(Node* node)
330 {
331     m_compileOkay = false;
332     m_jit.abortWithReason(DFGUnreachableNode, node->op());
333 }
334
335 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Node* node)
336 {
337     if (!m_compileOkay)
338         return;
339     speculationCheck(kind, jsValueRegs, node, m_jit.jump());
340     m_compileOkay = false;
341     if (verboseCompilationEnabled())
342         dataLog("Bailing compilation.\n");
343 }
344
345 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Edge nodeUse)
346 {
347     terminateSpeculativeExecution(kind, jsValueRegs, nodeUse.node());
348 }
349
350 void SpeculativeJIT::typeCheck(JSValueSource source, Edge edge, SpeculatedType typesPassedThrough, MacroAssembler::Jump jumpToFail, ExitKind exitKind)
351 {
352     ASSERT(needsTypeCheck(edge, typesPassedThrough));
353     m_interpreter.filter(edge, typesPassedThrough);
354     speculationCheck(exitKind, source, edge.node(), jumpToFail);
355 }
356
357 RegisterSet SpeculativeJIT::usedRegisters()
358 {
359     RegisterSet result;
360     
361     for (unsigned i = GPRInfo::numberOfRegisters; i--;) {
362         GPRReg gpr = GPRInfo::toRegister(i);
363         if (m_gprs.isInUse(gpr))
364             result.set(gpr);
365     }
366     for (unsigned i = FPRInfo::numberOfRegisters; i--;) {
367         FPRReg fpr = FPRInfo::toRegister(i);
368         if (m_fprs.isInUse(fpr))
369             result.set(fpr);
370     }
371     
372     result.merge(RegisterSet::stubUnavailableRegisters());
373     
374     return result;
375 }
376
377 void SpeculativeJIT::addSlowPathGenerator(std::unique_ptr<SlowPathGenerator> slowPathGenerator)
378 {
379     m_slowPathGenerators.append(WTFMove(slowPathGenerator));
380 }
381
382 void SpeculativeJIT::addSlowPathGenerator(std::function<void()> lambda)
383 {
384     m_slowPathLambdas.append(SlowPathLambda{ lambda, m_currentNode, static_cast<unsigned>(m_stream->size()) });
385 }
386
387 void SpeculativeJIT::runSlowPathGenerators(PCToCodeOriginMapBuilder& pcToCodeOriginMapBuilder)
388 {
389     for (auto& slowPathGenerator : m_slowPathGenerators) {
390         pcToCodeOriginMapBuilder.appendItem(m_jit.labelIgnoringWatchpoints(), slowPathGenerator->origin().semantic);
391         slowPathGenerator->generate(this);
392     }
393     for (auto& slowPathLambda : m_slowPathLambdas) {
394         Node* currentNode = slowPathLambda.currentNode;
395         m_currentNode = currentNode;
396         m_outOfLineStreamIndex = slowPathLambda.streamIndex;
397         pcToCodeOriginMapBuilder.appendItem(m_jit.labelIgnoringWatchpoints(), currentNode->origin.semantic);
398         slowPathLambda.generator();
399         m_outOfLineStreamIndex = Nullopt;
400     }
401 }
402
403 void SpeculativeJIT::clearGenerationInfo()
404 {
405     for (unsigned i = 0; i < m_generationInfo.size(); ++i)
406         m_generationInfo[i] = GenerationInfo();
407     m_gprs = RegisterBank<GPRInfo>();
408     m_fprs = RegisterBank<FPRInfo>();
409 }
410
411 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForGPR(VirtualRegister spillMe, GPRReg source)
412 {
413     GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
414     Node* node = info.node();
415     DataFormat registerFormat = info.registerFormat();
416     ASSERT(registerFormat != DataFormatNone);
417     ASSERT(registerFormat != DataFormatDouble);
418         
419     SilentSpillAction spillAction;
420     SilentFillAction fillAction;
421         
422     if (!info.needsSpill())
423         spillAction = DoNothingForSpill;
424     else {
425 #if USE(JSVALUE64)
426         ASSERT(info.gpr() == source);
427         if (registerFormat == DataFormatInt32)
428             spillAction = Store32Payload;
429         else if (registerFormat == DataFormatCell || registerFormat == DataFormatStorage)
430             spillAction = StorePtr;
431         else if (registerFormat == DataFormatInt52 || registerFormat == DataFormatStrictInt52)
432             spillAction = Store64;
433         else {
434             ASSERT(registerFormat & DataFormatJS);
435             spillAction = Store64;
436         }
437 #elif USE(JSVALUE32_64)
438         if (registerFormat & DataFormatJS) {
439             ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
440             spillAction = source == info.tagGPR() ? Store32Tag : Store32Payload;
441         } else {
442             ASSERT(info.gpr() == source);
443             spillAction = Store32Payload;
444         }
445 #endif
446     }
447         
448     if (registerFormat == DataFormatInt32) {
449         ASSERT(info.gpr() == source);
450         ASSERT(isJSInt32(info.registerFormat()));
451         if (node->hasConstant()) {
452             ASSERT(node->isInt32Constant());
453             fillAction = SetInt32Constant;
454         } else
455             fillAction = Load32Payload;
456     } else if (registerFormat == DataFormatBoolean) {
457 #if USE(JSVALUE64)
458         RELEASE_ASSERT_NOT_REACHED();
459 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
460         fillAction = DoNothingForFill;
461 #endif
462 #elif USE(JSVALUE32_64)
463         ASSERT(info.gpr() == source);
464         if (node->hasConstant()) {
465             ASSERT(node->isBooleanConstant());
466             fillAction = SetBooleanConstant;
467         } else
468             fillAction = Load32Payload;
469 #endif
470     } else if (registerFormat == DataFormatCell) {
471         ASSERT(info.gpr() == source);
472         if (node->hasConstant()) {
473             DFG_ASSERT(m_jit.graph(), m_currentNode, node->isCellConstant());
474             node->asCell(); // To get the assertion.
475             fillAction = SetCellConstant;
476         } else {
477 #if USE(JSVALUE64)
478             fillAction = LoadPtr;
479 #else
480             fillAction = Load32Payload;
481 #endif
482         }
483     } else if (registerFormat == DataFormatStorage) {
484         ASSERT(info.gpr() == source);
485         fillAction = LoadPtr;
486     } else if (registerFormat == DataFormatInt52) {
487         if (node->hasConstant())
488             fillAction = SetInt52Constant;
489         else if (info.spillFormat() == DataFormatInt52)
490             fillAction = Load64;
491         else if (info.spillFormat() == DataFormatStrictInt52)
492             fillAction = Load64ShiftInt52Left;
493         else if (info.spillFormat() == DataFormatNone)
494             fillAction = Load64;
495         else {
496             RELEASE_ASSERT_NOT_REACHED();
497 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
498             fillAction = Load64; // Make GCC happy.
499 #endif
500         }
501     } else if (registerFormat == DataFormatStrictInt52) {
502         if (node->hasConstant())
503             fillAction = SetStrictInt52Constant;
504         else if (info.spillFormat() == DataFormatInt52)
505             fillAction = Load64ShiftInt52Right;
506         else if (info.spillFormat() == DataFormatStrictInt52)
507             fillAction = Load64;
508         else if (info.spillFormat() == DataFormatNone)
509             fillAction = Load64;
510         else {
511             RELEASE_ASSERT_NOT_REACHED();
512 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
513             fillAction = Load64; // Make GCC happy.
514 #endif
515         }
516     } else {
517         ASSERT(registerFormat & DataFormatJS);
518 #if USE(JSVALUE64)
519         ASSERT(info.gpr() == source);
520         if (node->hasConstant()) {
521             if (node->isCellConstant())
522                 fillAction = SetTrustedJSConstant;
523             else
524                 fillAction = SetJSConstant;
525         } else if (info.spillFormat() == DataFormatInt32) {
526             ASSERT(registerFormat == DataFormatJSInt32);
527             fillAction = Load32PayloadBoxInt;
528         } else
529             fillAction = Load64;
530 #else
531         ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
532         if (node->hasConstant())
533             fillAction = info.tagGPR() == source ? SetJSConstantTag : SetJSConstantPayload;
534         else if (info.payloadGPR() == source)
535             fillAction = Load32Payload;
536         else { // Fill the Tag
537             switch (info.spillFormat()) {
538             case DataFormatInt32:
539                 ASSERT(registerFormat == DataFormatJSInt32);
540                 fillAction = SetInt32Tag;
541                 break;
542             case DataFormatCell:
543                 ASSERT(registerFormat == DataFormatJSCell);
544                 fillAction = SetCellTag;
545                 break;
546             case DataFormatBoolean:
547                 ASSERT(registerFormat == DataFormatJSBoolean);
548                 fillAction = SetBooleanTag;
549                 break;
550             default:
551                 fillAction = Load32Tag;
552                 break;
553             }
554         }
555 #endif
556     }
557         
558     return SilentRegisterSavePlan(spillAction, fillAction, node, source);
559 }
560     
561 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForFPR(VirtualRegister spillMe, FPRReg source)
562 {
563     GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
564     Node* node = info.node();
565     ASSERT(info.registerFormat() == DataFormatDouble);
566
567     SilentSpillAction spillAction;
568     SilentFillAction fillAction;
569         
570     if (!info.needsSpill())
571         spillAction = DoNothingForSpill;
572     else {
573         ASSERT(!node->hasConstant());
574         ASSERT(info.spillFormat() == DataFormatNone);
575         ASSERT(info.fpr() == source);
576         spillAction = StoreDouble;
577     }
578         
579 #if USE(JSVALUE64)
580     if (node->hasConstant()) {
581         node->asNumber(); // To get the assertion.
582         fillAction = SetDoubleConstant;
583     } else {
584         ASSERT(info.spillFormat() == DataFormatNone || info.spillFormat() == DataFormatDouble);
585         fillAction = LoadDouble;
586     }
587 #elif USE(JSVALUE32_64)
588     ASSERT(info.registerFormat() == DataFormatDouble);
589     if (node->hasConstant()) {
590         node->asNumber(); // To get the assertion.
591         fillAction = SetDoubleConstant;
592     } else
593         fillAction = LoadDouble;
594 #endif
595
596     return SilentRegisterSavePlan(spillAction, fillAction, node, source);
597 }
598     
599 void SpeculativeJIT::silentSpill(const SilentRegisterSavePlan& plan)
600 {
601     switch (plan.spillAction()) {
602     case DoNothingForSpill:
603         break;
604     case Store32Tag:
605         m_jit.store32(plan.gpr(), JITCompiler::tagFor(plan.node()->virtualRegister()));
606         break;
607     case Store32Payload:
608         m_jit.store32(plan.gpr(), JITCompiler::payloadFor(plan.node()->virtualRegister()));
609         break;
610     case StorePtr:
611         m_jit.storePtr(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
612         break;
613 #if USE(JSVALUE64)
614     case Store64:
615         m_jit.store64(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
616         break;
617 #endif
618     case StoreDouble:
619         m_jit.storeDouble(plan.fpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
620         break;
621     default:
622         RELEASE_ASSERT_NOT_REACHED();
623     }
624 }
625     
626 void SpeculativeJIT::silentFill(const SilentRegisterSavePlan& plan, GPRReg canTrample)
627 {
628 #if USE(JSVALUE32_64)
629     UNUSED_PARAM(canTrample);
630 #endif
631     switch (plan.fillAction()) {
632     case DoNothingForFill:
633         break;
634     case SetInt32Constant:
635         m_jit.move(Imm32(plan.node()->asInt32()), plan.gpr());
636         break;
637 #if USE(JSVALUE64)
638     case SetInt52Constant:
639         m_jit.move(Imm64(plan.node()->asAnyInt() << JSValue::int52ShiftAmount), plan.gpr());
640         break;
641     case SetStrictInt52Constant:
642         m_jit.move(Imm64(plan.node()->asAnyInt()), plan.gpr());
643         break;
644 #endif // USE(JSVALUE64)
645     case SetBooleanConstant:
646         m_jit.move(TrustedImm32(plan.node()->asBoolean()), plan.gpr());
647         break;
648     case SetCellConstant:
649         m_jit.move(TrustedImmPtr(plan.node()->asCell()), plan.gpr());
650         break;
651 #if USE(JSVALUE64)
652     case SetTrustedJSConstant:
653         m_jit.move(valueOfJSConstantAsImm64(plan.node()).asTrustedImm64(), plan.gpr());
654         break;
655     case SetJSConstant:
656         m_jit.move(valueOfJSConstantAsImm64(plan.node()), plan.gpr());
657         break;
658     case SetDoubleConstant:
659         m_jit.move(Imm64(reinterpretDoubleToInt64(plan.node()->asNumber())), canTrample);
660         m_jit.move64ToDouble(canTrample, plan.fpr());
661         break;
662     case Load32PayloadBoxInt:
663         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
664         m_jit.or64(GPRInfo::tagTypeNumberRegister, plan.gpr());
665         break;
666     case Load32PayloadConvertToInt52:
667         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
668         m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
669         m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
670         break;
671     case Load32PayloadSignExtend:
672         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
673         m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
674         break;
675 #else
676     case SetJSConstantTag:
677         m_jit.move(Imm32(plan.node()->asJSValue().tag()), plan.gpr());
678         break;
679     case SetJSConstantPayload:
680         m_jit.move(Imm32(plan.node()->asJSValue().payload()), plan.gpr());
681         break;
682     case SetInt32Tag:
683         m_jit.move(TrustedImm32(JSValue::Int32Tag), plan.gpr());
684         break;
685     case SetCellTag:
686         m_jit.move(TrustedImm32(JSValue::CellTag), plan.gpr());
687         break;
688     case SetBooleanTag:
689         m_jit.move(TrustedImm32(JSValue::BooleanTag), plan.gpr());
690         break;
691     case SetDoubleConstant:
692         m_jit.loadDouble(TrustedImmPtr(m_jit.addressOfDoubleConstant(plan.node())), plan.fpr());
693         break;
694 #endif
695     case Load32Tag:
696         m_jit.load32(JITCompiler::tagFor(plan.node()->virtualRegister()), plan.gpr());
697         break;
698     case Load32Payload:
699         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
700         break;
701     case LoadPtr:
702         m_jit.loadPtr(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
703         break;
704 #if USE(JSVALUE64)
705     case Load64:
706         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
707         break;
708     case Load64ShiftInt52Right:
709         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
710         m_jit.rshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
711         break;
712     case Load64ShiftInt52Left:
713         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
714         m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
715         break;
716 #endif
717     case LoadDouble:
718         m_jit.loadDouble(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.fpr());
719         break;
720     default:
721         RELEASE_ASSERT_NOT_REACHED();
722     }
723 }
724     
725 JITCompiler::Jump SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode, IndexingType shape)
726 {
727     switch (arrayMode.arrayClass()) {
728     case Array::OriginalArray: {
729         CRASH();
730 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
731         JITCompiler::Jump result; // I already know that VC++ takes unkindly to the expression "return Jump()", so I'm doing it this way in anticipation of someone eventually using VC++ to compile the DFG.
732         return result;
733 #endif
734     }
735         
736     case Array::Array:
737         m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
738         return m_jit.branch32(
739             MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | shape));
740         
741     case Array::NonArray:
742     case Array::OriginalNonArray:
743         m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
744         return m_jit.branch32(
745             MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
746         
747     case Array::PossiblyArray:
748         m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
749         return m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
750     }
751     
752     RELEASE_ASSERT_NOT_REACHED();
753     return JITCompiler::Jump();
754 }
755
756 JITCompiler::JumpList SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode)
757 {
758     JITCompiler::JumpList result;
759     
760     switch (arrayMode.type()) {
761     case Array::Int32:
762         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, Int32Shape);
763
764     case Array::Double:
765         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, DoubleShape);
766
767     case Array::Contiguous:
768         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, ContiguousShape);
769
770     case Array::Undecided:
771         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, UndecidedShape);
772
773     case Array::ArrayStorage:
774     case Array::SlowPutArrayStorage: {
775         ASSERT(!arrayMode.isJSArrayWithOriginalStructure());
776         
777         if (arrayMode.isJSArray()) {
778             if (arrayMode.isSlowPut()) {
779                 result.append(
780                     m_jit.branchTest32(
781                         MacroAssembler::Zero, tempGPR, MacroAssembler::TrustedImm32(IsArray)));
782                 m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
783                 m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
784                 result.append(
785                     m_jit.branch32(
786                         MacroAssembler::Above, tempGPR,
787                         TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
788                 break;
789             }
790             m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
791             result.append(
792                 m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | ArrayStorageShape)));
793             break;
794         }
795         m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
796         if (arrayMode.isSlowPut()) {
797             m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
798             result.append(
799                 m_jit.branch32(
800                     MacroAssembler::Above, tempGPR,
801                     TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
802             break;
803         }
804         result.append(
805             m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(ArrayStorageShape)));
806         break;
807     }
808     default:
809         CRASH();
810         break;
811     }
812     
813     return result;
814 }
815
816 void SpeculativeJIT::checkArray(Node* node)
817 {
818     ASSERT(node->arrayMode().isSpecific());
819     ASSERT(!node->arrayMode().doesConversion());
820     
821     SpeculateCellOperand base(this, node->child1());
822     GPRReg baseReg = base.gpr();
823     
824     if (node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1()))) {
825         noResult(m_currentNode);
826         return;
827     }
828     
829     const ClassInfo* expectedClassInfo = 0;
830     
831     switch (node->arrayMode().type()) {
832     case Array::AnyTypedArray:
833     case Array::String:
834         RELEASE_ASSERT_NOT_REACHED(); // Should have been a Phantom(String:)
835         break;
836     case Array::Int32:
837     case Array::Double:
838     case Array::Contiguous:
839     case Array::Undecided:
840     case Array::ArrayStorage:
841     case Array::SlowPutArrayStorage: {
842         GPRTemporary temp(this);
843         GPRReg tempGPR = temp.gpr();
844         m_jit.load8(MacroAssembler::Address(baseReg, JSCell::indexingTypeOffset()), tempGPR);
845         speculationCheck(
846             BadIndexingType, JSValueSource::unboxedCell(baseReg), 0,
847             jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
848         
849         noResult(m_currentNode);
850         return;
851     }
852     case Array::DirectArguments:
853         speculateCellTypeWithoutTypeFiltering(node->child1(), baseReg, DirectArgumentsType);
854         noResult(m_currentNode);
855         return;
856     case Array::ScopedArguments:
857         speculateCellTypeWithoutTypeFiltering(node->child1(), baseReg, ScopedArgumentsType);
858         noResult(m_currentNode);
859         return;
860     default:
861         speculateCellTypeWithoutTypeFiltering(
862             node->child1(), baseReg,
863             typeForTypedArrayType(node->arrayMode().typedArrayType()));
864         noResult(m_currentNode);
865         return;
866     }
867     
868     RELEASE_ASSERT(expectedClassInfo);
869     
870     GPRTemporary temp(this);
871     GPRTemporary temp2(this);
872     m_jit.emitLoadStructure(baseReg, temp.gpr(), temp2.gpr());
873     speculationCheck(
874         BadType, JSValueSource::unboxedCell(baseReg), node,
875         m_jit.branchPtr(
876             MacroAssembler::NotEqual,
877             MacroAssembler::Address(temp.gpr(), Structure::classInfoOffset()),
878             MacroAssembler::TrustedImmPtr(expectedClassInfo)));
879     
880     noResult(m_currentNode);
881 }
882
883 void SpeculativeJIT::arrayify(Node* node, GPRReg baseReg, GPRReg propertyReg)
884 {
885     ASSERT(node->arrayMode().doesConversion());
886     
887     GPRTemporary temp(this);
888     GPRTemporary structure;
889     GPRReg tempGPR = temp.gpr();
890     GPRReg structureGPR = InvalidGPRReg;
891     
892     if (node->op() != ArrayifyToStructure) {
893         GPRTemporary realStructure(this);
894         structure.adopt(realStructure);
895         structureGPR = structure.gpr();
896     }
897         
898     // We can skip all that comes next if we already have array storage.
899     MacroAssembler::JumpList slowPath;
900     
901     if (node->op() == ArrayifyToStructure) {
902         slowPath.append(m_jit.branchWeakStructure(
903             JITCompiler::NotEqual,
904             JITCompiler::Address(baseReg, JSCell::structureIDOffset()),
905             node->structure()));
906     } else {
907         m_jit.load8(
908             MacroAssembler::Address(baseReg, JSCell::indexingTypeOffset()), tempGPR);
909         
910         slowPath.append(jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
911     }
912     
913     addSlowPathGenerator(std::make_unique<ArrayifySlowPathGenerator>(
914         slowPath, this, node, baseReg, propertyReg, tempGPR, structureGPR));
915     
916     noResult(m_currentNode);
917 }
918
919 void SpeculativeJIT::arrayify(Node* node)
920 {
921     ASSERT(node->arrayMode().isSpecific());
922     
923     SpeculateCellOperand base(this, node->child1());
924     
925     if (!node->child2()) {
926         arrayify(node, base.gpr(), InvalidGPRReg);
927         return;
928     }
929     
930     SpeculateInt32Operand property(this, node->child2());
931     
932     arrayify(node, base.gpr(), property.gpr());
933 }
934
935 GPRReg SpeculativeJIT::fillStorage(Edge edge)
936 {
937     VirtualRegister virtualRegister = edge->virtualRegister();
938     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
939     
940     switch (info.registerFormat()) {
941     case DataFormatNone: {
942         if (info.spillFormat() == DataFormatStorage) {
943             GPRReg gpr = allocate();
944             m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
945             m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr);
946             info.fillStorage(*m_stream, gpr);
947             return gpr;
948         }
949         
950         // Must be a cell; fill it as a cell and then return the pointer.
951         return fillSpeculateCell(edge);
952     }
953         
954     case DataFormatStorage: {
955         GPRReg gpr = info.gpr();
956         m_gprs.lock(gpr);
957         return gpr;
958     }
959         
960     default:
961         return fillSpeculateCell(edge);
962     }
963 }
964
965 void SpeculativeJIT::useChildren(Node* node)
966 {
967     if (node->flags() & NodeHasVarArgs) {
968         for (unsigned childIdx = node->firstChild(); childIdx < node->firstChild() + node->numChildren(); childIdx++) {
969             if (!!m_jit.graph().m_varArgChildren[childIdx])
970                 use(m_jit.graph().m_varArgChildren[childIdx]);
971         }
972     } else {
973         Edge child1 = node->child1();
974         if (!child1) {
975             ASSERT(!node->child2() && !node->child3());
976             return;
977         }
978         use(child1);
979         
980         Edge child2 = node->child2();
981         if (!child2) {
982             ASSERT(!node->child3());
983             return;
984         }
985         use(child2);
986         
987         Edge child3 = node->child3();
988         if (!child3)
989             return;
990         use(child3);
991     }
992 }
993
994 void SpeculativeJIT::compileTryGetById(Node* node)
995 {
996     switch (node->child1().useKind()) {
997     case CellUse: {
998         SpeculateCellOperand base(this, node->child1());
999         JSValueRegsTemporary result(this, Reuse, base);
1000
1001         JSValueRegs baseRegs = JSValueRegs::payloadOnly(base.gpr());
1002         JSValueRegs resultRegs = result.regs();
1003
1004         base.use();
1005
1006         cachedGetById(node->origin.semantic, baseRegs, resultRegs, node->identifierNumber(), JITCompiler::Jump(), NeedToSpill, AccessType::TryGet);
1007
1008         jsValueResult(resultRegs, node, DataFormatJS, UseChildrenCalledExplicitly);
1009         break;
1010     }
1011
1012     case UntypedUse: {
1013         JSValueOperand base(this, node->child1());
1014         JSValueRegsTemporary result(this, Reuse, base);
1015
1016         JSValueRegs baseRegs = base.jsValueRegs();
1017         JSValueRegs resultRegs = result.regs();
1018
1019         base.use();
1020
1021         JITCompiler::Jump notCell = m_jit.branchIfNotCell(baseRegs);
1022
1023         cachedGetById(node->origin.semantic, baseRegs, resultRegs, node->identifierNumber(), notCell, NeedToSpill, AccessType::TryGet);
1024
1025         jsValueResult(resultRegs, node, DataFormatJS, UseChildrenCalledExplicitly);
1026         break;
1027     }
1028
1029     default:
1030         DFG_CRASH(m_jit.graph(), node, "Bad use kind");
1031         break;
1032     } 
1033 }
1034
1035 void SpeculativeJIT::compilePureGetById(Node* node)
1036 {
1037     ASSERT(node->op() == PureGetById);
1038
1039     switch (node->child1().useKind()) {
1040     case CellUse: {
1041         SpeculateCellOperand base(this, node->child1());
1042         JSValueRegsTemporary result(this, Reuse, base);
1043
1044         JSValueRegs baseRegs = JSValueRegs::payloadOnly(base.gpr());
1045         JSValueRegs resultRegs = result.regs();
1046
1047         cachedGetById(node->origin.semantic, baseRegs, resultRegs, node->identifierNumber(), JITCompiler::Jump(), NeedToSpill, AccessType::PureGet);
1048
1049         jsValueResult(resultRegs, node);
1050         break;
1051     }
1052     case UntypedUse: {
1053         JSValueOperand base(this, node->child1());
1054         JSValueRegsTemporary result(this, Reuse, base);
1055
1056         JSValueRegs baseRegs = base.jsValueRegs();
1057         JSValueRegs resultRegs = result.regs();
1058     
1059         JITCompiler::Jump notCell = m_jit.branchIfNotCell(baseRegs);
1060     
1061         cachedGetById(node->origin.semantic, baseRegs, resultRegs, node->identifierNumber(), notCell, NeedToSpill, AccessType::PureGet);
1062     
1063         jsValueResult(resultRegs, node);
1064         break;
1065     }
1066     default:
1067         RELEASE_ASSERT_NOT_REACHED();
1068     }
1069 }
1070
1071 void SpeculativeJIT::compileIn(Node* node)
1072 {
1073     SpeculateCellOperand base(this, node->child2());
1074     GPRReg baseGPR = base.gpr();
1075     
1076     if (JSString* string = node->child1()->dynamicCastConstant<JSString*>()) {
1077         if (string->tryGetValueImpl() && string->tryGetValueImpl()->isAtomic()) {
1078             StructureStubInfo* stubInfo = m_jit.codeBlock()->addStubInfo(AccessType::In);
1079             
1080             GPRTemporary result(this);
1081             GPRReg resultGPR = result.gpr();
1082
1083             use(node->child1());
1084             
1085             MacroAssembler::PatchableJump jump = m_jit.patchableJump();
1086             MacroAssembler::Label done = m_jit.label();
1087             
1088             // Since this block is executed only when the result of string->tryGetValueImpl() is atomic,
1089             // we can cast it to const AtomicStringImpl* safely.
1090             auto slowPath = slowPathCall(
1091                 jump.m_jump, this, operationInOptimize,
1092                 JSValueRegs::payloadOnly(resultGPR), stubInfo, baseGPR,
1093                 static_cast<const AtomicStringImpl*>(string->tryGetValueImpl()));
1094             
1095             stubInfo->callSiteIndex = m_jit.addCallSite(node->origin.semantic);
1096             stubInfo->codeOrigin = node->origin.semantic;
1097             stubInfo->patch.baseGPR = static_cast<int8_t>(baseGPR);
1098             stubInfo->patch.valueGPR = static_cast<int8_t>(resultGPR);
1099 #if USE(JSVALUE32_64)
1100             stubInfo->patch.valueTagGPR = static_cast<int8_t>(InvalidGPRReg);
1101             stubInfo->patch.baseTagGPR = static_cast<int8_t>(InvalidGPRReg);
1102 #endif
1103             stubInfo->patch.usedRegisters = usedRegisters();
1104
1105             m_jit.addIn(InRecord(jump, done, slowPath.get(), stubInfo));
1106             addSlowPathGenerator(WTFMove(slowPath));
1107
1108             base.use();
1109
1110             blessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
1111             return;
1112         }
1113     }
1114
1115     JSValueOperand key(this, node->child1());
1116     JSValueRegs regs = key.jsValueRegs();
1117         
1118     GPRFlushedCallResult result(this);
1119     GPRReg resultGPR = result.gpr();
1120         
1121     base.use();
1122     key.use();
1123         
1124     flushRegisters();
1125     callOperation(
1126         operationGenericIn, extractResult(JSValueRegs::payloadOnly(resultGPR)),
1127         baseGPR, regs);
1128     m_jit.exceptionCheck();
1129     blessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
1130 }
1131
1132 void SpeculativeJIT::compileDeleteById(Node* node)
1133 {
1134     JSValueOperand value(this, node->child1());
1135     GPRFlushedCallResult result(this);
1136
1137     JSValueRegs valueRegs = value.jsValueRegs();
1138     GPRReg resultGPR = result.gpr();
1139
1140     value.use();
1141
1142     flushRegisters();
1143     callOperation(operationDeleteById, resultGPR, valueRegs, identifierUID(node->identifierNumber()));
1144     m_jit.exceptionCheck();
1145
1146     unblessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
1147 }
1148
1149 void SpeculativeJIT::compileDeleteByVal(Node* node)
1150 {
1151     JSValueOperand base(this, node->child1());
1152     JSValueOperand key(this, node->child2());
1153     GPRFlushedCallResult result(this);
1154
1155     JSValueRegs baseRegs = base.jsValueRegs();
1156     JSValueRegs keyRegs = key.jsValueRegs();
1157     GPRReg resultGPR = result.gpr();
1158
1159     base.use();
1160     key.use();
1161
1162     flushRegisters();
1163     callOperation(operationDeleteByVal, resultGPR, baseRegs, keyRegs);
1164     m_jit.exceptionCheck();
1165
1166     unblessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
1167 }
1168
1169 bool SpeculativeJIT::nonSpeculativeCompare(Node* node, MacroAssembler::RelationalCondition cond, S_JITOperation_EJJ helperFunction)
1170 {
1171     unsigned branchIndexInBlock = detectPeepHoleBranch();
1172     if (branchIndexInBlock != UINT_MAX) {
1173         Node* branchNode = m_block->at(branchIndexInBlock);
1174
1175         ASSERT(node->adjustedRefCount() == 1);
1176         
1177         nonSpeculativePeepholeBranch(node, branchNode, cond, helperFunction);
1178     
1179         m_indexInBlock = branchIndexInBlock;
1180         m_currentNode = branchNode;
1181         
1182         return true;
1183     }
1184     
1185     nonSpeculativeNonPeepholeCompare(node, cond, helperFunction);
1186     
1187     return false;
1188 }
1189
1190 bool SpeculativeJIT::nonSpeculativeStrictEq(Node* node, bool invert)
1191 {
1192     unsigned branchIndexInBlock = detectPeepHoleBranch();
1193     if (branchIndexInBlock != UINT_MAX) {
1194         Node* branchNode = m_block->at(branchIndexInBlock);
1195
1196         ASSERT(node->adjustedRefCount() == 1);
1197         
1198         nonSpeculativePeepholeStrictEq(node, branchNode, invert);
1199     
1200         m_indexInBlock = branchIndexInBlock;
1201         m_currentNode = branchNode;
1202         
1203         return true;
1204     }
1205     
1206     nonSpeculativeNonPeepholeStrictEq(node, invert);
1207     
1208     return false;
1209 }
1210
1211 static const char* dataFormatString(DataFormat format)
1212 {
1213     // These values correspond to the DataFormat enum.
1214     const char* strings[] = {
1215         "[  ]",
1216         "[ i]",
1217         "[ d]",
1218         "[ c]",
1219         "Err!",
1220         "Err!",
1221         "Err!",
1222         "Err!",
1223         "[J ]",
1224         "[Ji]",
1225         "[Jd]",
1226         "[Jc]",
1227         "Err!",
1228         "Err!",
1229         "Err!",
1230         "Err!",
1231     };
1232     return strings[format];
1233 }
1234
1235 void SpeculativeJIT::dump(const char* label)
1236 {
1237     if (label)
1238         dataLogF("<%s>\n", label);
1239
1240     dataLogF("  gprs:\n");
1241     m_gprs.dump();
1242     dataLogF("  fprs:\n");
1243     m_fprs.dump();
1244     dataLogF("  VirtualRegisters:\n");
1245     for (unsigned i = 0; i < m_generationInfo.size(); ++i) {
1246         GenerationInfo& info = m_generationInfo[i];
1247         if (info.alive())
1248             dataLogF("    % 3d:%s%s", i, dataFormatString(info.registerFormat()), dataFormatString(info.spillFormat()));
1249         else
1250             dataLogF("    % 3d:[__][__]", i);
1251         if (info.registerFormat() == DataFormatDouble)
1252             dataLogF(":fpr%d\n", info.fpr());
1253         else if (info.registerFormat() != DataFormatNone
1254 #if USE(JSVALUE32_64)
1255             && !(info.registerFormat() & DataFormatJS)
1256 #endif
1257             ) {
1258             ASSERT(info.gpr() != InvalidGPRReg);
1259             dataLogF(":%s\n", GPRInfo::debugName(info.gpr()));
1260         } else
1261             dataLogF("\n");
1262     }
1263     if (label)
1264         dataLogF("</%s>\n", label);
1265 }
1266
1267 GPRTemporary::GPRTemporary()
1268     : m_jit(0)
1269     , m_gpr(InvalidGPRReg)
1270 {
1271 }
1272
1273 GPRTemporary::GPRTemporary(SpeculativeJIT* jit)
1274     : m_jit(jit)
1275     , m_gpr(InvalidGPRReg)
1276 {
1277     m_gpr = m_jit->allocate();
1278 }
1279
1280 GPRTemporary::GPRTemporary(SpeculativeJIT* jit, GPRReg specific)
1281     : m_jit(jit)
1282     , m_gpr(InvalidGPRReg)
1283 {
1284     m_gpr = m_jit->allocate(specific);
1285 }
1286
1287 #if USE(JSVALUE32_64)
1288 GPRTemporary::GPRTemporary(
1289     SpeculativeJIT* jit, ReuseTag, JSValueOperand& op1, WhichValueWord which)
1290     : m_jit(jit)
1291     , m_gpr(InvalidGPRReg)
1292 {
1293     if (!op1.isDouble() && m_jit->canReuse(op1.node()))
1294         m_gpr = m_jit->reuse(op1.gpr(which));
1295     else
1296         m_gpr = m_jit->allocate();
1297 }
1298 #endif // USE(JSVALUE32_64)
1299
1300 JSValueRegsTemporary::JSValueRegsTemporary() { }
1301
1302 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit)
1303 #if USE(JSVALUE64)
1304     : m_gpr(jit)
1305 #else
1306     : m_payloadGPR(jit)
1307     , m_tagGPR(jit)
1308 #endif
1309 {
1310 }
1311
1312 #if USE(JSVALUE64)
1313 template<typename T>
1314 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit, ReuseTag, T& operand, WhichValueWord)
1315     : m_gpr(jit, Reuse, operand)
1316 {
1317 }
1318 #else
1319 template<typename T>
1320 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit, ReuseTag, T& operand, WhichValueWord resultWord)
1321 {
1322     if (resultWord == PayloadWord) {
1323         m_payloadGPR = GPRTemporary(jit, Reuse, operand);
1324         m_tagGPR = GPRTemporary(jit);
1325     } else {
1326         m_payloadGPR = GPRTemporary(jit);
1327         m_tagGPR = GPRTemporary(jit, Reuse, operand);
1328     }
1329 }
1330 #endif
1331
1332 #if USE(JSVALUE64)
1333 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit, ReuseTag, JSValueOperand& operand)
1334 {
1335     m_gpr = GPRTemporary(jit, Reuse, operand);
1336 }
1337 #else
1338 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit, ReuseTag, JSValueOperand& operand)
1339 {
1340     if (jit->canReuse(operand.node())) {
1341         m_payloadGPR = GPRTemporary(jit, Reuse, operand, PayloadWord);
1342         m_tagGPR = GPRTemporary(jit, Reuse, operand, TagWord);
1343     } else {
1344         m_payloadGPR = GPRTemporary(jit);
1345         m_tagGPR = GPRTemporary(jit);
1346     }
1347 }
1348 #endif
1349
1350 JSValueRegsTemporary::~JSValueRegsTemporary() { }
1351
1352 JSValueRegs JSValueRegsTemporary::regs()
1353 {
1354 #if USE(JSVALUE64)
1355     return JSValueRegs(m_gpr.gpr());
1356 #else
1357     return JSValueRegs(m_tagGPR.gpr(), m_payloadGPR.gpr());
1358 #endif
1359 }
1360
1361 void GPRTemporary::adopt(GPRTemporary& other)
1362 {
1363     ASSERT(!m_jit);
1364     ASSERT(m_gpr == InvalidGPRReg);
1365     ASSERT(other.m_jit);
1366     ASSERT(other.m_gpr != InvalidGPRReg);
1367     m_jit = other.m_jit;
1368     m_gpr = other.m_gpr;
1369     other.m_jit = 0;
1370     other.m_gpr = InvalidGPRReg;
1371 }
1372
1373 FPRTemporary::FPRTemporary(FPRTemporary&& other)
1374 {
1375     ASSERT(other.m_jit);
1376     ASSERT(other.m_fpr != InvalidFPRReg);
1377     m_jit = other.m_jit;
1378     m_fpr = other.m_fpr;
1379
1380     other.m_jit = nullptr;
1381 }
1382
1383 FPRTemporary::FPRTemporary(SpeculativeJIT* jit)
1384     : m_jit(jit)
1385     , m_fpr(InvalidFPRReg)
1386 {
1387     m_fpr = m_jit->fprAllocate();
1388 }
1389
1390 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1)
1391     : m_jit(jit)
1392     , m_fpr(InvalidFPRReg)
1393 {
1394     if (m_jit->canReuse(op1.node()))
1395         m_fpr = m_jit->reuse(op1.fpr());
1396     else
1397         m_fpr = m_jit->fprAllocate();
1398 }
1399
1400 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1, SpeculateDoubleOperand& op2)
1401     : m_jit(jit)
1402     , m_fpr(InvalidFPRReg)
1403 {
1404     if (m_jit->canReuse(op1.node()))
1405         m_fpr = m_jit->reuse(op1.fpr());
1406     else if (m_jit->canReuse(op2.node()))
1407         m_fpr = m_jit->reuse(op2.fpr());
1408     else if (m_jit->canReuse(op1.node(), op2.node()) && op1.fpr() == op2.fpr())
1409         m_fpr = m_jit->reuse(op1.fpr());
1410     else
1411         m_fpr = m_jit->fprAllocate();
1412 }
1413
1414 #if USE(JSVALUE32_64)
1415 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, JSValueOperand& op1)
1416     : m_jit(jit)
1417     , m_fpr(InvalidFPRReg)
1418 {
1419     if (op1.isDouble() && m_jit->canReuse(op1.node()))
1420         m_fpr = m_jit->reuse(op1.fpr());
1421     else
1422         m_fpr = m_jit->fprAllocate();
1423 }
1424 #endif
1425
1426 void SpeculativeJIT::compilePeepHoleDoubleBranch(Node* node, Node* branchNode, JITCompiler::DoubleCondition condition)
1427 {
1428     BasicBlock* taken = branchNode->branchData()->taken.block;
1429     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1430
1431     if (taken == nextBlock()) {
1432         condition = MacroAssembler::invert(condition);
1433         std::swap(taken, notTaken);
1434     }
1435
1436     SpeculateDoubleOperand op1(this, node->child1());
1437     SpeculateDoubleOperand op2(this, node->child2());
1438     
1439     branchDouble(condition, op1.fpr(), op2.fpr(), taken);
1440     jump(notTaken);
1441 }
1442
1443 void SpeculativeJIT::compilePeepHoleObjectEquality(Node* node, Node* branchNode)
1444 {
1445     BasicBlock* taken = branchNode->branchData()->taken.block;
1446     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1447
1448     MacroAssembler::RelationalCondition condition = MacroAssembler::Equal;
1449     
1450     if (taken == nextBlock()) {
1451         condition = MacroAssembler::NotEqual;
1452         BasicBlock* tmp = taken;
1453         taken = notTaken;
1454         notTaken = tmp;
1455     }
1456
1457     SpeculateCellOperand op1(this, node->child1());
1458     SpeculateCellOperand op2(this, node->child2());
1459     
1460     GPRReg op1GPR = op1.gpr();
1461     GPRReg op2GPR = op2.gpr();
1462     
1463     if (masqueradesAsUndefinedWatchpointIsStillValid()) {
1464         if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1465             speculationCheck(
1466                 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(), m_jit.branchIfNotObject(op1GPR));
1467         }
1468         if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1469             speculationCheck(
1470                 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(), m_jit.branchIfNotObject(op2GPR));
1471         }
1472     } else {
1473         if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1474             speculationCheck(
1475                 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1476                 m_jit.branchIfNotObject(op1GPR));
1477         }
1478         speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1479             m_jit.branchTest8(
1480                 MacroAssembler::NonZero, 
1481                 MacroAssembler::Address(op1GPR, JSCell::typeInfoFlagsOffset()), 
1482                 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1483
1484         if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1485             speculationCheck(
1486                 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1487                 m_jit.branchIfNotObject(op2GPR));
1488         }
1489         speculationCheck(BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1490             m_jit.branchTest8(
1491                 MacroAssembler::NonZero, 
1492                 MacroAssembler::Address(op2GPR, JSCell::typeInfoFlagsOffset()), 
1493                 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1494     }
1495
1496     branchPtr(condition, op1GPR, op2GPR, taken);
1497     jump(notTaken);
1498 }
1499
1500 void SpeculativeJIT::compilePeepHoleBooleanBranch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1501 {
1502     BasicBlock* taken = branchNode->branchData()->taken.block;
1503     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1504
1505     // The branch instruction will branch to the taken block.
1506     // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1507     if (taken == nextBlock()) {
1508         condition = JITCompiler::invert(condition);
1509         BasicBlock* tmp = taken;
1510         taken = notTaken;
1511         notTaken = tmp;
1512     }
1513
1514     if (node->child1()->isInt32Constant()) {
1515         int32_t imm = node->child1()->asInt32();
1516         SpeculateBooleanOperand op2(this, node->child2());
1517         branch32(condition, JITCompiler::Imm32(imm), op2.gpr(), taken);
1518     } else if (node->child2()->isInt32Constant()) {
1519         SpeculateBooleanOperand op1(this, node->child1());
1520         int32_t imm = node->child2()->asInt32();
1521         branch32(condition, op1.gpr(), JITCompiler::Imm32(imm), taken);
1522     } else {
1523         SpeculateBooleanOperand op1(this, node->child1());
1524         SpeculateBooleanOperand op2(this, node->child2());
1525         branch32(condition, op1.gpr(), op2.gpr(), taken);
1526     }
1527
1528     jump(notTaken);
1529 }
1530
1531 void SpeculativeJIT::compileToLowerCase(Node* node)
1532 {
1533     ASSERT(node->op() == ToLowerCase);
1534     SpeculateCellOperand string(this, node->child1());
1535     GPRTemporary temp(this);
1536     GPRTemporary index(this);
1537     GPRTemporary charReg(this);
1538     GPRTemporary length(this);
1539
1540     GPRReg stringGPR = string.gpr();
1541     GPRReg tempGPR = temp.gpr();
1542     GPRReg indexGPR = index.gpr();
1543     GPRReg charGPR = charReg.gpr();
1544     GPRReg lengthGPR = length.gpr();
1545
1546     speculateString(node->child1(), stringGPR);
1547
1548     CCallHelpers::JumpList slowPath;
1549
1550     m_jit.move(TrustedImmPtr(0), indexGPR);
1551
1552     m_jit.loadPtr(MacroAssembler::Address(stringGPR, JSString::offsetOfValue()), tempGPR);
1553     slowPath.append(m_jit.branchTestPtr(MacroAssembler::Zero, tempGPR));
1554
1555     slowPath.append(m_jit.branchTest32(
1556         MacroAssembler::Zero, MacroAssembler::Address(tempGPR, StringImpl::flagsOffset()),
1557         MacroAssembler::TrustedImm32(StringImpl::flagIs8Bit())));
1558     m_jit.load32(MacroAssembler::Address(tempGPR, StringImpl::lengthMemoryOffset()), lengthGPR);
1559     m_jit.loadPtr(MacroAssembler::Address(tempGPR, StringImpl::dataOffset()), tempGPR);
1560
1561     auto loopStart = m_jit.label();
1562     auto loopDone = m_jit.branch32(CCallHelpers::AboveOrEqual, indexGPR, lengthGPR);
1563     m_jit.load8(MacroAssembler::BaseIndex(tempGPR, indexGPR, MacroAssembler::TimesOne), charGPR);
1564     slowPath.append(m_jit.branchTest32(CCallHelpers::NonZero, charGPR, TrustedImm32(~0x7F)));
1565     m_jit.sub32(TrustedImm32('A'), charGPR);
1566     slowPath.append(m_jit.branch32(CCallHelpers::BelowOrEqual, charGPR, TrustedImm32('Z' - 'A')));
1567
1568     m_jit.add32(TrustedImm32(1), indexGPR);
1569     m_jit.jump().linkTo(loopStart, &m_jit);
1570     
1571     slowPath.link(&m_jit);
1572     silentSpillAllRegisters(lengthGPR);
1573     callOperation(operationToLowerCase, lengthGPR, stringGPR, indexGPR);
1574     silentFillAllRegisters(lengthGPR);
1575     m_jit.exceptionCheck();
1576     auto done = m_jit.jump();
1577
1578     loopDone.link(&m_jit);
1579     m_jit.move(stringGPR, lengthGPR);
1580
1581     done.link(&m_jit);
1582     cellResult(lengthGPR, node);
1583 }
1584
1585 void SpeculativeJIT::compilePeepHoleInt32Branch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1586 {
1587     BasicBlock* taken = branchNode->branchData()->taken.block;
1588     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1589
1590     // The branch instruction will branch to the taken block.
1591     // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1592     if (taken == nextBlock()) {
1593         condition = JITCompiler::invert(condition);
1594         BasicBlock* tmp = taken;
1595         taken = notTaken;
1596         notTaken = tmp;
1597     }
1598
1599     if (node->child1()->isInt32Constant()) {
1600         int32_t imm = node->child1()->asInt32();
1601         SpeculateInt32Operand op2(this, node->child2());
1602         branch32(condition, JITCompiler::Imm32(imm), op2.gpr(), taken);
1603     } else if (node->child2()->isInt32Constant()) {
1604         SpeculateInt32Operand op1(this, node->child1());
1605         int32_t imm = node->child2()->asInt32();
1606         branch32(condition, op1.gpr(), JITCompiler::Imm32(imm), taken);
1607     } else {
1608         SpeculateInt32Operand op1(this, node->child1());
1609         SpeculateInt32Operand op2(this, node->child2());
1610         branch32(condition, op1.gpr(), op2.gpr(), taken);
1611     }
1612
1613     jump(notTaken);
1614 }
1615
1616 // Returns true if the compare is fused with a subsequent branch.
1617 bool SpeculativeJIT::compilePeepHoleBranch(Node* node, MacroAssembler::RelationalCondition condition, MacroAssembler::DoubleCondition doubleCondition, S_JITOperation_EJJ operation)
1618 {
1619     // Fused compare & branch.
1620     unsigned branchIndexInBlock = detectPeepHoleBranch();
1621     if (branchIndexInBlock != UINT_MAX) {
1622         Node* branchNode = m_block->at(branchIndexInBlock);
1623
1624         // detectPeepHoleBranch currently only permits the branch to be the very next node,
1625         // so can be no intervening nodes to also reference the compare. 
1626         ASSERT(node->adjustedRefCount() == 1);
1627
1628         if (node->isBinaryUseKind(Int32Use))
1629             compilePeepHoleInt32Branch(node, branchNode, condition);
1630 #if USE(JSVALUE64)
1631         else if (node->isBinaryUseKind(Int52RepUse))
1632             compilePeepHoleInt52Branch(node, branchNode, condition);
1633 #endif // USE(JSVALUE64)
1634         else if (node->isBinaryUseKind(StringUse) || node->isBinaryUseKind(StringIdentUse)) {
1635             // Use non-peephole comparison, for now.
1636             return false;
1637         } else if (node->isBinaryUseKind(DoubleRepUse))
1638             compilePeepHoleDoubleBranch(node, branchNode, doubleCondition);
1639         else if (node->op() == CompareEq) {
1640             if (node->isBinaryUseKind(BooleanUse))
1641                 compilePeepHoleBooleanBranch(node, branchNode, condition);
1642             else if (node->isBinaryUseKind(SymbolUse))
1643                 compilePeepHoleSymbolEquality(node, branchNode);
1644             else if (node->isBinaryUseKind(ObjectUse))
1645                 compilePeepHoleObjectEquality(node, branchNode);
1646             else if (node->isBinaryUseKind(ObjectUse, ObjectOrOtherUse))
1647                 compilePeepHoleObjectToObjectOrOtherEquality(node->child1(), node->child2(), branchNode);
1648             else if (node->isBinaryUseKind(ObjectOrOtherUse, ObjectUse))
1649                 compilePeepHoleObjectToObjectOrOtherEquality(node->child2(), node->child1(), branchNode);
1650             else if (!needsTypeCheck(node->child1(), SpecOther))
1651                 nonSpeculativePeepholeBranchNullOrUndefined(node->child2(), branchNode);
1652             else if (!needsTypeCheck(node->child2(), SpecOther))
1653                 nonSpeculativePeepholeBranchNullOrUndefined(node->child1(), branchNode);
1654             else {
1655                 nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1656                 return true;
1657             }
1658         } else {
1659             nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1660             return true;
1661         }
1662
1663         use(node->child1());
1664         use(node->child2());
1665         m_indexInBlock = branchIndexInBlock;
1666         m_currentNode = branchNode;
1667         return true;
1668     }
1669     return false;
1670 }
1671
1672 void SpeculativeJIT::noticeOSRBirth(Node* node)
1673 {
1674     if (!node->hasVirtualRegister())
1675         return;
1676     
1677     VirtualRegister virtualRegister = node->virtualRegister();
1678     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1679     
1680     info.noticeOSRBirth(*m_stream, node, virtualRegister);
1681 }
1682
1683 void SpeculativeJIT::compileMovHint(Node* node)
1684 {
1685     ASSERT(node->containsMovHint() && node->op() != ZombieHint);
1686     
1687     Node* child = node->child1().node();
1688     noticeOSRBirth(child);
1689     
1690     m_stream->appendAndLog(VariableEvent::movHint(MinifiedID(child), node->unlinkedLocal()));
1691 }
1692
1693 void SpeculativeJIT::bail(AbortReason reason)
1694 {
1695     if (verboseCompilationEnabled())
1696         dataLog("Bailing compilation.\n");
1697     m_compileOkay = true;
1698     m_jit.abortWithReason(reason, m_lastGeneratedNode);
1699     clearGenerationInfo();
1700 }
1701
1702 void SpeculativeJIT::compileCurrentBlock()
1703 {
1704     ASSERT(m_compileOkay);
1705     
1706     if (!m_block)
1707         return;
1708     
1709     ASSERT(m_block->isReachable);
1710     
1711     m_jit.blockHeads()[m_block->index] = m_jit.label();
1712
1713     if (!m_block->intersectionOfCFAHasVisited) {
1714         // Don't generate code for basic blocks that are unreachable according to CFA.
1715         // But to be sure that nobody has generated a jump to this block, drop in a
1716         // breakpoint here.
1717         m_jit.abortWithReason(DFGUnreachableBasicBlock);
1718         return;
1719     }
1720
1721     m_stream->appendAndLog(VariableEvent::reset());
1722     
1723     m_jit.jitAssertHasValidCallFrame();
1724     m_jit.jitAssertTagsInPlace();
1725     m_jit.jitAssertArgumentCountSane();
1726
1727     m_state.reset();
1728     m_state.beginBasicBlock(m_block);
1729     
1730     for (size_t i = m_block->variablesAtHead.size(); i--;) {
1731         int operand = m_block->variablesAtHead.operandForIndex(i);
1732         Node* node = m_block->variablesAtHead[i];
1733         if (!node)
1734             continue; // No need to record dead SetLocal's.
1735         
1736         VariableAccessData* variable = node->variableAccessData();
1737         DataFormat format;
1738         if (!node->refCount())
1739             continue; // No need to record dead SetLocal's.
1740         format = dataFormatFor(variable->flushFormat());
1741         m_stream->appendAndLog(
1742             VariableEvent::setLocal(
1743                 VirtualRegister(operand),
1744                 variable->machineLocal(),
1745                 format));
1746     }
1747
1748     m_origin = NodeOrigin();
1749     
1750     for (m_indexInBlock = 0; m_indexInBlock < m_block->size(); ++m_indexInBlock) {
1751         m_currentNode = m_block->at(m_indexInBlock);
1752         
1753         // We may have hit a contradiction that the CFA was aware of but that the JIT
1754         // didn't cause directly.
1755         if (!m_state.isValid()) {
1756             bail(DFGBailedAtTopOfBlock);
1757             return;
1758         }
1759
1760         m_interpreter.startExecuting();
1761         m_interpreter.executeKnownEdgeTypes(m_currentNode);
1762         m_jit.setForNode(m_currentNode);
1763         m_origin = m_currentNode->origin;
1764         if (validationEnabled())
1765             m_origin.exitOK &= mayExit(m_jit.graph(), m_currentNode) == Exits;
1766         m_lastGeneratedNode = m_currentNode->op();
1767         
1768         ASSERT(m_currentNode->shouldGenerate());
1769         
1770         if (verboseCompilationEnabled()) {
1771             dataLogF(
1772                 "SpeculativeJIT generating Node @%d (bc#%u) at JIT offset 0x%x",
1773                 (int)m_currentNode->index(),
1774                 m_currentNode->origin.semantic.bytecodeIndex, m_jit.debugOffset());
1775             dataLog("\n");
1776         }
1777
1778         if (Options::validateDFGExceptionHandling() && (mayExit(m_jit.graph(), m_currentNode) != DoesNotExit || m_currentNode->isTerminal()))
1779             m_jit.jitReleaseAssertNoException();
1780
1781         m_jit.pcToCodeOriginMapBuilder().appendItem(m_jit.labelIgnoringWatchpoints(), m_origin.semantic);
1782
1783         compile(m_currentNode);
1784         
1785         if (belongsInMinifiedGraph(m_currentNode->op()))
1786             m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode));
1787         
1788 #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
1789         m_jit.clearRegisterAllocationOffsets();
1790 #endif
1791         
1792         if (!m_compileOkay) {
1793             bail(DFGBailedAtEndOfNode);
1794             return;
1795         }
1796         
1797         // Make sure that the abstract state is rematerialized for the next node.
1798         m_interpreter.executeEffects(m_indexInBlock);
1799     }
1800     
1801     // Perform the most basic verification that children have been used correctly.
1802     if (!ASSERT_DISABLED) {
1803         for (unsigned index = 0; index < m_generationInfo.size(); ++index) {
1804             GenerationInfo& info = m_generationInfo[index];
1805             RELEASE_ASSERT(!info.alive());
1806         }
1807     }
1808 }
1809
1810 // If we are making type predictions about our arguments then
1811 // we need to check that they are correct on function entry.
1812 void SpeculativeJIT::checkArgumentTypes()
1813 {
1814     ASSERT(!m_currentNode);
1815     m_origin = NodeOrigin(CodeOrigin(0), CodeOrigin(0), true);
1816
1817     for (int i = 0; i < m_jit.codeBlock()->numParameters(); ++i) {
1818         Node* node = m_jit.graph().m_arguments[i];
1819         if (!node) {
1820             // The argument is dead. We don't do any checks for such arguments.
1821             continue;
1822         }
1823         
1824         ASSERT(node->op() == SetArgument);
1825         ASSERT(node->shouldGenerate());
1826
1827         VariableAccessData* variableAccessData = node->variableAccessData();
1828         FlushFormat format = variableAccessData->flushFormat();
1829         
1830         if (format == FlushedJSValue)
1831             continue;
1832         
1833         VirtualRegister virtualRegister = variableAccessData->local();
1834
1835         JSValueSource valueSource = JSValueSource(JITCompiler::addressFor(virtualRegister));
1836         
1837 #if USE(JSVALUE64)
1838         switch (format) {
1839         case FlushedInt32: {
1840             speculationCheck(BadType, valueSource, node, m_jit.branch64(MacroAssembler::Below, JITCompiler::addressFor(virtualRegister), GPRInfo::tagTypeNumberRegister));
1841             break;
1842         }
1843         case FlushedBoolean: {
1844             GPRTemporary temp(this);
1845             m_jit.load64(JITCompiler::addressFor(virtualRegister), temp.gpr());
1846             m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), temp.gpr());
1847             speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, temp.gpr(), TrustedImm32(static_cast<int32_t>(~1))));
1848             break;
1849         }
1850         case FlushedCell: {
1851             speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, JITCompiler::addressFor(virtualRegister), GPRInfo::tagMaskRegister));
1852             break;
1853         }
1854         default:
1855             RELEASE_ASSERT_NOT_REACHED();
1856             break;
1857         }
1858 #else
1859         switch (format) {
1860         case FlushedInt32: {
1861             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::Int32Tag)));
1862             break;
1863         }
1864         case FlushedBoolean: {
1865             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::BooleanTag)));
1866             break;
1867         }
1868         case FlushedCell: {
1869             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::CellTag)));
1870             break;
1871         }
1872         default:
1873             RELEASE_ASSERT_NOT_REACHED();
1874             break;
1875         }
1876 #endif
1877     }
1878
1879     m_origin = NodeOrigin();
1880 }
1881
1882 bool SpeculativeJIT::compile()
1883 {
1884     checkArgumentTypes();
1885     
1886     ASSERT(!m_currentNode);
1887     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1888         m_jit.setForBlockIndex(blockIndex);
1889         m_block = m_jit.graph().block(blockIndex);
1890         compileCurrentBlock();
1891     }
1892     linkBranches();
1893     return true;
1894 }
1895
1896 void SpeculativeJIT::createOSREntries()
1897 {
1898     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1899         BasicBlock* block = m_jit.graph().block(blockIndex);
1900         if (!block)
1901             continue;
1902         if (!block->isOSRTarget)
1903             continue;
1904         
1905         // Currently we don't have OSR entry trampolines. We could add them
1906         // here if need be.
1907         m_osrEntryHeads.append(m_jit.blockHeads()[blockIndex]);
1908     }
1909 }
1910
1911 void SpeculativeJIT::linkOSREntries(LinkBuffer& linkBuffer)
1912 {
1913     unsigned osrEntryIndex = 0;
1914     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1915         BasicBlock* block = m_jit.graph().block(blockIndex);
1916         if (!block)
1917             continue;
1918         if (!block->isOSRTarget)
1919             continue;
1920         m_jit.noticeOSREntry(*block, m_osrEntryHeads[osrEntryIndex++], linkBuffer);
1921     }
1922     ASSERT(osrEntryIndex == m_osrEntryHeads.size());
1923     
1924     if (verboseCompilationEnabled()) {
1925         DumpContext dumpContext;
1926         dataLog("OSR Entries:\n");
1927         for (OSREntryData& entryData : m_jit.jitCode()->osrEntry)
1928             dataLog("    ", inContext(entryData, &dumpContext), "\n");
1929         if (!dumpContext.isEmpty())
1930             dumpContext.dump(WTF::dataFile());
1931     }
1932 }
1933
1934 void SpeculativeJIT::compileDoublePutByVal(Node* node, SpeculateCellOperand& base, SpeculateStrictInt32Operand& property)
1935 {
1936     Edge child3 = m_jit.graph().varArgChild(node, 2);
1937     Edge child4 = m_jit.graph().varArgChild(node, 3);
1938
1939     ArrayMode arrayMode = node->arrayMode();
1940     
1941     GPRReg baseReg = base.gpr();
1942     GPRReg propertyReg = property.gpr();
1943     
1944     SpeculateDoubleOperand value(this, child3);
1945
1946     FPRReg valueReg = value.fpr();
1947     
1948     DFG_TYPE_CHECK(
1949         JSValueRegs(), child3, SpecFullRealNumber,
1950         m_jit.branchDouble(
1951             MacroAssembler::DoubleNotEqualOrUnordered, valueReg, valueReg));
1952     
1953     if (!m_compileOkay)
1954         return;
1955     
1956     StorageOperand storage(this, child4);
1957     GPRReg storageReg = storage.gpr();
1958
1959     if (node->op() == PutByValAlias) {
1960         // Store the value to the array.
1961         GPRReg propertyReg = property.gpr();
1962         FPRReg valueReg = value.fpr();
1963         m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
1964         
1965         noResult(m_currentNode);
1966         return;
1967     }
1968     
1969     GPRTemporary temporary;
1970     GPRReg temporaryReg = temporaryRegisterForPutByVal(temporary, node);
1971
1972     MacroAssembler::Jump slowCase;
1973     
1974     if (arrayMode.isInBounds()) {
1975         speculationCheck(
1976             OutOfBounds, JSValueRegs(), 0,
1977             m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())));
1978     } else {
1979         MacroAssembler::Jump inBounds = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
1980         
1981         slowCase = m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfVectorLength()));
1982         
1983         if (!arrayMode.isOutOfBounds())
1984             speculationCheck(OutOfBounds, JSValueRegs(), 0, slowCase);
1985         
1986         m_jit.add32(TrustedImm32(1), propertyReg, temporaryReg);
1987         m_jit.store32(temporaryReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
1988         
1989         inBounds.link(&m_jit);
1990     }
1991     
1992     m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
1993
1994     base.use();
1995     property.use();
1996     value.use();
1997     storage.use();
1998     
1999     if (arrayMode.isOutOfBounds()) {
2000         addSlowPathGenerator(
2001             slowPathCall(
2002                 slowCase, this,
2003                 m_jit.codeBlock()->isStrictMode() ? operationPutDoubleByValBeyondArrayBoundsStrict : operationPutDoubleByValBeyondArrayBoundsNonStrict,
2004                 NoResult, baseReg, propertyReg, valueReg));
2005     }
2006
2007     noResult(m_currentNode, UseChildrenCalledExplicitly);
2008 }
2009
2010 void SpeculativeJIT::compileGetCharCodeAt(Node* node)
2011 {
2012     SpeculateCellOperand string(this, node->child1());
2013     SpeculateStrictInt32Operand index(this, node->child2());
2014     StorageOperand storage(this, node->child3());
2015
2016     GPRReg stringReg = string.gpr();
2017     GPRReg indexReg = index.gpr();
2018     GPRReg storageReg = storage.gpr();
2019     
2020     ASSERT(speculationChecked(m_state.forNode(node->child1()).m_type, SpecString));
2021
2022     // unsigned comparison so we can filter out negative indices and indices that are too large
2023     speculationCheck(Uncountable, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::AboveOrEqual, indexReg, MacroAssembler::Address(stringReg, JSString::offsetOfLength())));
2024
2025     GPRTemporary scratch(this);
2026     GPRReg scratchReg = scratch.gpr();
2027
2028     m_jit.loadPtr(MacroAssembler::Address(stringReg, JSString::offsetOfValue()), scratchReg);
2029
2030     // Load the character into scratchReg
2031     JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
2032
2033     m_jit.load8(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesOne, 0), scratchReg);
2034     JITCompiler::Jump cont8Bit = m_jit.jump();
2035
2036     is16Bit.link(&m_jit);
2037
2038     m_jit.load16(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesTwo, 0), scratchReg);
2039
2040     cont8Bit.link(&m_jit);
2041
2042     int32Result(scratchReg, m_currentNode);
2043 }
2044
2045 void SpeculativeJIT::compileGetByValOnString(Node* node)
2046 {
2047     SpeculateCellOperand base(this, node->child1());
2048     SpeculateStrictInt32Operand property(this, node->child2());
2049     StorageOperand storage(this, node->child3());
2050     GPRReg baseReg = base.gpr();
2051     GPRReg propertyReg = property.gpr();
2052     GPRReg storageReg = storage.gpr();
2053
2054     GPRTemporary scratch(this);
2055     GPRReg scratchReg = scratch.gpr();
2056 #if USE(JSVALUE32_64)
2057     GPRTemporary resultTag;
2058     GPRReg resultTagReg = InvalidGPRReg;
2059     if (node->arrayMode().isOutOfBounds()) {
2060         GPRTemporary realResultTag(this);
2061         resultTag.adopt(realResultTag);
2062         resultTagReg = resultTag.gpr();
2063     }
2064 #endif
2065
2066     ASSERT(ArrayMode(Array::String).alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2067
2068     // unsigned comparison so we can filter out negative indices and indices that are too large
2069     JITCompiler::Jump outOfBounds = m_jit.branch32(
2070         MacroAssembler::AboveOrEqual, propertyReg,
2071         MacroAssembler::Address(baseReg, JSString::offsetOfLength()));
2072     if (node->arrayMode().isInBounds())
2073         speculationCheck(OutOfBounds, JSValueRegs(), 0, outOfBounds);
2074
2075     m_jit.loadPtr(MacroAssembler::Address(baseReg, JSString::offsetOfValue()), scratchReg);
2076
2077     // Load the character into scratchReg
2078     JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
2079
2080     m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne, 0), scratchReg);
2081     JITCompiler::Jump cont8Bit = m_jit.jump();
2082
2083     is16Bit.link(&m_jit);
2084
2085     m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo, 0), scratchReg);
2086
2087     JITCompiler::Jump bigCharacter =
2088         m_jit.branch32(MacroAssembler::AboveOrEqual, scratchReg, TrustedImm32(0x100));
2089
2090     // 8 bit string values don't need the isASCII check.
2091     cont8Bit.link(&m_jit);
2092
2093     m_jit.lshift32(MacroAssembler::TrustedImm32(sizeof(void*) == 4 ? 2 : 3), scratchReg);
2094     m_jit.addPtr(MacroAssembler::TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), scratchReg);
2095     m_jit.loadPtr(scratchReg, scratchReg);
2096
2097     addSlowPathGenerator(
2098         slowPathCall(
2099             bigCharacter, this, operationSingleCharacterString, scratchReg, scratchReg));
2100
2101     if (node->arrayMode().isOutOfBounds()) {
2102 #if USE(JSVALUE32_64)
2103         m_jit.move(TrustedImm32(JSValue::CellTag), resultTagReg);
2104 #endif
2105
2106         JSGlobalObject* globalObject = m_jit.globalObjectFor(node->origin.semantic);
2107         bool prototypeChainIsSane = false;
2108         if (globalObject->stringPrototypeChainIsSane()) {
2109             // FIXME: This could be captured using a Speculation mode that means "out-of-bounds
2110             // loads return a trivial value". Something like SaneChainOutOfBounds. This should
2111             // speculate that we don't take negative out-of-bounds, or better yet, it should rely
2112             // on a stringPrototypeChainIsSane() guaranteeing that the prototypes have no negative
2113             // indexed properties either.
2114             // https://bugs.webkit.org/show_bug.cgi?id=144668
2115             m_jit.graph().watchpoints().addLazily(globalObject->stringPrototype()->structure()->transitionWatchpointSet());
2116             m_jit.graph().watchpoints().addLazily(globalObject->objectPrototype()->structure()->transitionWatchpointSet());
2117             prototypeChainIsSane = globalObject->stringPrototypeChainIsSane();
2118         }
2119         if (prototypeChainIsSane) {
2120             m_jit.graph().watchpoints().addLazily(globalObject->stringPrototype()->structure()->transitionWatchpointSet());
2121             m_jit.graph().watchpoints().addLazily(globalObject->objectPrototype()->structure()->transitionWatchpointSet());
2122             
2123 #if USE(JSVALUE64)
2124             addSlowPathGenerator(std::make_unique<SaneStringGetByValSlowPathGenerator>(
2125                 outOfBounds, this, JSValueRegs(scratchReg), baseReg, propertyReg));
2126 #else
2127             addSlowPathGenerator(std::make_unique<SaneStringGetByValSlowPathGenerator>(
2128                 outOfBounds, this, JSValueRegs(resultTagReg, scratchReg),
2129                 baseReg, propertyReg));
2130 #endif
2131         } else {
2132 #if USE(JSVALUE64)
2133             addSlowPathGenerator(
2134                 slowPathCall(
2135                     outOfBounds, this, operationGetByValStringInt,
2136                     scratchReg, baseReg, propertyReg));
2137 #else
2138             addSlowPathGenerator(
2139                 slowPathCall(
2140                     outOfBounds, this, operationGetByValStringInt,
2141                     JSValueRegs(resultTagReg, scratchReg), baseReg, propertyReg));
2142 #endif
2143         }
2144         
2145 #if USE(JSVALUE64)
2146         jsValueResult(scratchReg, m_currentNode);
2147 #else
2148         jsValueResult(resultTagReg, scratchReg, m_currentNode);
2149 #endif
2150     } else
2151         cellResult(scratchReg, m_currentNode);
2152 }
2153
2154 void SpeculativeJIT::compileFromCharCode(Node* node)
2155 {
2156     Edge& child = node->child1();
2157     if (child.useKind() == UntypedUse) {
2158         JSValueOperand opr(this, child);
2159         JSValueRegs oprRegs = opr.jsValueRegs();
2160 #if USE(JSVALUE64)
2161         GPRTemporary result(this);
2162         JSValueRegs resultRegs = JSValueRegs(result.gpr());
2163 #else
2164         GPRTemporary resultTag(this);
2165         GPRTemporary resultPayload(this);
2166         JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
2167 #endif
2168         flushRegisters();
2169         callOperation(operationStringFromCharCodeUntyped, resultRegs, oprRegs);
2170         m_jit.exceptionCheck();
2171         
2172         jsValueResult(resultRegs, node);
2173         return;
2174     }
2175
2176     SpeculateStrictInt32Operand property(this, child);
2177     GPRReg propertyReg = property.gpr();
2178     GPRTemporary smallStrings(this);
2179     GPRTemporary scratch(this);
2180     GPRReg scratchReg = scratch.gpr();
2181     GPRReg smallStringsReg = smallStrings.gpr();
2182
2183     JITCompiler::JumpList slowCases;
2184     slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, TrustedImm32(0xff)));
2185     m_jit.move(MacroAssembler::TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), smallStringsReg);
2186     m_jit.loadPtr(MacroAssembler::BaseIndex(smallStringsReg, propertyReg, MacroAssembler::ScalePtr, 0), scratchReg);
2187
2188     slowCases.append(m_jit.branchTest32(MacroAssembler::Zero, scratchReg));
2189     addSlowPathGenerator(slowPathCall(slowCases, this, operationStringFromCharCode, scratchReg, propertyReg));
2190     cellResult(scratchReg, m_currentNode);
2191 }
2192
2193 GeneratedOperandType SpeculativeJIT::checkGeneratedTypeForToInt32(Node* node)
2194 {
2195     VirtualRegister virtualRegister = node->virtualRegister();
2196     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
2197
2198     switch (info.registerFormat()) {
2199     case DataFormatStorage:
2200         RELEASE_ASSERT_NOT_REACHED();
2201
2202     case DataFormatBoolean:
2203     case DataFormatCell:
2204         terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
2205         return GeneratedOperandTypeUnknown;
2206
2207     case DataFormatNone:
2208     case DataFormatJSCell:
2209     case DataFormatJS:
2210     case DataFormatJSBoolean:
2211     case DataFormatJSDouble:
2212         return GeneratedOperandJSValue;
2213
2214     case DataFormatJSInt32:
2215     case DataFormatInt32:
2216         return GeneratedOperandInteger;
2217
2218     default:
2219         RELEASE_ASSERT_NOT_REACHED();
2220         return GeneratedOperandTypeUnknown;
2221     }
2222 }
2223
2224 void SpeculativeJIT::compileValueToInt32(Node* node)
2225 {
2226     switch (node->child1().useKind()) {
2227 #if USE(JSVALUE64)
2228     case Int52RepUse: {
2229         SpeculateStrictInt52Operand op1(this, node->child1());
2230         GPRTemporary result(this, Reuse, op1);
2231         GPRReg op1GPR = op1.gpr();
2232         GPRReg resultGPR = result.gpr();
2233         m_jit.zeroExtend32ToPtr(op1GPR, resultGPR);
2234         int32Result(resultGPR, node, DataFormatInt32);
2235         return;
2236     }
2237 #endif // USE(JSVALUE64)
2238         
2239     case DoubleRepUse: {
2240         GPRTemporary result(this);
2241         SpeculateDoubleOperand op1(this, node->child1());
2242         FPRReg fpr = op1.fpr();
2243         GPRReg gpr = result.gpr();
2244         JITCompiler::Jump notTruncatedToInteger = m_jit.branchTruncateDoubleToInt32(fpr, gpr, JITCompiler::BranchIfTruncateFailed);
2245         
2246         addSlowPathGenerator(slowPathCall(notTruncatedToInteger, this, operationToInt32, NeedToSpill, ExceptionCheckRequirement::CheckNotNeeded, gpr, fpr));
2247         
2248         int32Result(gpr, node);
2249         return;
2250     }
2251     
2252     case NumberUse:
2253     case NotCellUse: {
2254         switch (checkGeneratedTypeForToInt32(node->child1().node())) {
2255         case GeneratedOperandInteger: {
2256             SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
2257             GPRTemporary result(this, Reuse, op1);
2258             m_jit.move(op1.gpr(), result.gpr());
2259             int32Result(result.gpr(), node, op1.format());
2260             return;
2261         }
2262         case GeneratedOperandJSValue: {
2263             GPRTemporary result(this);
2264 #if USE(JSVALUE64)
2265             JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2266
2267             GPRReg gpr = op1.gpr();
2268             GPRReg resultGpr = result.gpr();
2269             FPRTemporary tempFpr(this);
2270             FPRReg fpr = tempFpr.fpr();
2271
2272             JITCompiler::Jump isInteger = m_jit.branch64(MacroAssembler::AboveOrEqual, gpr, GPRInfo::tagTypeNumberRegister);
2273             JITCompiler::JumpList converted;
2274
2275             if (node->child1().useKind() == NumberUse) {
2276                 DFG_TYPE_CHECK(
2277                     JSValueRegs(gpr), node->child1(), SpecBytecodeNumber,
2278                     m_jit.branchTest64(
2279                         MacroAssembler::Zero, gpr, GPRInfo::tagTypeNumberRegister));
2280             } else {
2281                 JITCompiler::Jump isNumber = m_jit.branchTest64(MacroAssembler::NonZero, gpr, GPRInfo::tagTypeNumberRegister);
2282                 
2283                 DFG_TYPE_CHECK(
2284                     JSValueRegs(gpr), node->child1(), ~SpecCell, m_jit.branchIfCell(JSValueRegs(gpr)));
2285                 
2286                 // It's not a cell: so true turns into 1 and all else turns into 0.
2287                 m_jit.compare64(JITCompiler::Equal, gpr, TrustedImm32(ValueTrue), resultGpr);
2288                 converted.append(m_jit.jump());
2289                 
2290                 isNumber.link(&m_jit);
2291             }
2292
2293             // First, if we get here we have a double encoded as a JSValue
2294             unboxDouble(gpr, resultGpr, fpr);
2295
2296             silentSpillAllRegisters(resultGpr);
2297             callOperation(operationToInt32, resultGpr, fpr);
2298             silentFillAllRegisters(resultGpr);
2299
2300             converted.append(m_jit.jump());
2301
2302             isInteger.link(&m_jit);
2303             m_jit.zeroExtend32ToPtr(gpr, resultGpr);
2304
2305             converted.link(&m_jit);
2306 #else
2307             Node* childNode = node->child1().node();
2308             VirtualRegister virtualRegister = childNode->virtualRegister();
2309             GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
2310
2311             JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2312
2313             GPRReg payloadGPR = op1.payloadGPR();
2314             GPRReg resultGpr = result.gpr();
2315         
2316             JITCompiler::JumpList converted;
2317
2318             if (info.registerFormat() == DataFormatJSInt32)
2319                 m_jit.move(payloadGPR, resultGpr);
2320             else {
2321                 GPRReg tagGPR = op1.tagGPR();
2322                 FPRTemporary tempFpr(this);
2323                 FPRReg fpr = tempFpr.fpr();
2324                 FPRTemporary scratch(this);
2325
2326                 JITCompiler::Jump isInteger = m_jit.branch32(MacroAssembler::Equal, tagGPR, TrustedImm32(JSValue::Int32Tag));
2327
2328                 if (node->child1().useKind() == NumberUse) {
2329                     DFG_TYPE_CHECK(
2330                         op1.jsValueRegs(), node->child1(), SpecBytecodeNumber,
2331                         m_jit.branch32(
2332                             MacroAssembler::AboveOrEqual, tagGPR,
2333                             TrustedImm32(JSValue::LowestTag)));
2334                 } else {
2335                     JITCompiler::Jump isNumber = m_jit.branch32(MacroAssembler::Below, tagGPR, TrustedImm32(JSValue::LowestTag));
2336                     
2337                     DFG_TYPE_CHECK(
2338                         op1.jsValueRegs(), node->child1(), ~SpecCell,
2339                         m_jit.branchIfCell(op1.jsValueRegs()));
2340                     
2341                     // It's not a cell: so true turns into 1 and all else turns into 0.
2342                     JITCompiler::Jump isBoolean = m_jit.branch32(JITCompiler::Equal, tagGPR, TrustedImm32(JSValue::BooleanTag));
2343                     m_jit.move(TrustedImm32(0), resultGpr);
2344                     converted.append(m_jit.jump());
2345                     
2346                     isBoolean.link(&m_jit);
2347                     m_jit.move(payloadGPR, resultGpr);
2348                     converted.append(m_jit.jump());
2349                     
2350                     isNumber.link(&m_jit);
2351                 }
2352
2353                 unboxDouble(tagGPR, payloadGPR, fpr, scratch.fpr());
2354
2355                 silentSpillAllRegisters(resultGpr);
2356                 callOperation(operationToInt32, resultGpr, fpr);
2357                 silentFillAllRegisters(resultGpr);
2358
2359                 converted.append(m_jit.jump());
2360
2361                 isInteger.link(&m_jit);
2362                 m_jit.move(payloadGPR, resultGpr);
2363
2364                 converted.link(&m_jit);
2365             }
2366 #endif
2367             int32Result(resultGpr, node);
2368             return;
2369         }
2370         case GeneratedOperandTypeUnknown:
2371             RELEASE_ASSERT(!m_compileOkay);
2372             return;
2373         }
2374         RELEASE_ASSERT_NOT_REACHED();
2375         return;
2376     }
2377     
2378     default:
2379         ASSERT(!m_compileOkay);
2380         return;
2381     }
2382 }
2383
2384 void SpeculativeJIT::compileUInt32ToNumber(Node* node)
2385 {
2386     if (doesOverflow(node->arithMode())) {
2387         if (enableInt52()) {
2388             SpeculateInt32Operand op1(this, node->child1());
2389             GPRTemporary result(this, Reuse, op1);
2390             m_jit.zeroExtend32ToPtr(op1.gpr(), result.gpr());
2391             strictInt52Result(result.gpr(), node);
2392             return;
2393         }
2394         SpeculateInt32Operand op1(this, node->child1());
2395         FPRTemporary result(this);
2396             
2397         GPRReg inputGPR = op1.gpr();
2398         FPRReg outputFPR = result.fpr();
2399             
2400         m_jit.convertInt32ToDouble(inputGPR, outputFPR);
2401             
2402         JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, inputGPR, TrustedImm32(0));
2403         m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), outputFPR);
2404         positive.link(&m_jit);
2405             
2406         doubleResult(outputFPR, node);
2407         return;
2408     }
2409     
2410     RELEASE_ASSERT(node->arithMode() == Arith::CheckOverflow);
2411
2412     SpeculateInt32Operand op1(this, node->child1());
2413     GPRTemporary result(this);
2414
2415     m_jit.move(op1.gpr(), result.gpr());
2416
2417     speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, result.gpr(), TrustedImm32(0)));
2418
2419     int32Result(result.gpr(), node, op1.format());
2420 }
2421
2422 void SpeculativeJIT::compileDoubleAsInt32(Node* node)
2423 {
2424     SpeculateDoubleOperand op1(this, node->child1());
2425     FPRTemporary scratch(this);
2426     GPRTemporary result(this);
2427     
2428     FPRReg valueFPR = op1.fpr();
2429     FPRReg scratchFPR = scratch.fpr();
2430     GPRReg resultGPR = result.gpr();
2431
2432     JITCompiler::JumpList failureCases;
2433     RELEASE_ASSERT(shouldCheckOverflow(node->arithMode()));
2434     m_jit.branchConvertDoubleToInt32(
2435         valueFPR, resultGPR, failureCases, scratchFPR,
2436         shouldCheckNegativeZero(node->arithMode()));
2437     speculationCheck(Overflow, JSValueRegs(), 0, failureCases);
2438
2439     int32Result(resultGPR, node);
2440 }
2441
2442 void SpeculativeJIT::compileDoubleRep(Node* node)
2443 {
2444     switch (node->child1().useKind()) {
2445     case RealNumberUse: {
2446         JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2447         FPRTemporary result(this);
2448         
2449         JSValueRegs op1Regs = op1.jsValueRegs();
2450         FPRReg resultFPR = result.fpr();
2451         
2452 #if USE(JSVALUE64)
2453         GPRTemporary temp(this);
2454         GPRReg tempGPR = temp.gpr();
2455         m_jit.unboxDoubleWithoutAssertions(op1Regs.gpr(), tempGPR, resultFPR);
2456 #else
2457         FPRTemporary temp(this);
2458         FPRReg tempFPR = temp.fpr();
2459         unboxDouble(op1Regs.tagGPR(), op1Regs.payloadGPR(), resultFPR, tempFPR);
2460 #endif
2461         
2462         JITCompiler::Jump done = m_jit.branchDouble(
2463             JITCompiler::DoubleEqual, resultFPR, resultFPR);
2464         
2465         DFG_TYPE_CHECK(
2466             op1Regs, node->child1(), SpecBytecodeRealNumber, m_jit.branchIfNotInt32(op1Regs));
2467         m_jit.convertInt32ToDouble(op1Regs.payloadGPR(), resultFPR);
2468         
2469         done.link(&m_jit);
2470         
2471         doubleResult(resultFPR, node);
2472         return;
2473     }
2474     
2475     case NotCellUse:
2476     case NumberUse: {
2477         ASSERT(!node->child1()->isNumberConstant()); // This should have been constant folded.
2478
2479         SpeculatedType possibleTypes = m_state.forNode(node->child1()).m_type;
2480         if (isInt32Speculation(possibleTypes)) {
2481             SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
2482             FPRTemporary result(this);
2483             m_jit.convertInt32ToDouble(op1.gpr(), result.fpr());
2484             doubleResult(result.fpr(), node);
2485             return;
2486         }
2487
2488         JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2489         FPRTemporary result(this);
2490
2491 #if USE(JSVALUE64)
2492         GPRTemporary temp(this);
2493
2494         GPRReg op1GPR = op1.gpr();
2495         GPRReg tempGPR = temp.gpr();
2496         FPRReg resultFPR = result.fpr();
2497         JITCompiler::JumpList done;
2498
2499         JITCompiler::Jump isInteger = m_jit.branch64(
2500             MacroAssembler::AboveOrEqual, op1GPR, GPRInfo::tagTypeNumberRegister);
2501
2502         if (node->child1().useKind() == NotCellUse) {
2503             JITCompiler::Jump isNumber = m_jit.branchTest64(MacroAssembler::NonZero, op1GPR, GPRInfo::tagTypeNumberRegister);
2504             JITCompiler::Jump isUndefined = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueUndefined));
2505
2506             static const double zero = 0;
2507             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&zero), resultFPR);
2508
2509             JITCompiler::Jump isNull = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueNull));
2510             done.append(isNull);
2511
2512             DFG_TYPE_CHECK(JSValueRegs(op1GPR), node->child1(), ~SpecCell,
2513                 m_jit.branchTest64(JITCompiler::Zero, op1GPR, TrustedImm32(static_cast<int32_t>(TagBitBool))));
2514
2515             JITCompiler::Jump isFalse = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueFalse));
2516             static const double one = 1;
2517             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&one), resultFPR);
2518             done.append(m_jit.jump());
2519             done.append(isFalse);
2520
2521             isUndefined.link(&m_jit);
2522             static const double NaN = PNaN;
2523             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&NaN), resultFPR);
2524             done.append(m_jit.jump());
2525
2526             isNumber.link(&m_jit);
2527         } else if (needsTypeCheck(node->child1(), SpecBytecodeNumber)) {
2528             typeCheck(
2529                 JSValueRegs(op1GPR), node->child1(), SpecBytecodeNumber,
2530                 m_jit.branchTest64(MacroAssembler::Zero, op1GPR, GPRInfo::tagTypeNumberRegister));
2531         }
2532
2533         unboxDouble(op1GPR, tempGPR, resultFPR);
2534         done.append(m_jit.jump());
2535     
2536         isInteger.link(&m_jit);
2537         m_jit.convertInt32ToDouble(op1GPR, resultFPR);
2538         done.link(&m_jit);
2539 #else // USE(JSVALUE64) -> this is the 32_64 case
2540         FPRTemporary temp(this);
2541     
2542         GPRReg op1TagGPR = op1.tagGPR();
2543         GPRReg op1PayloadGPR = op1.payloadGPR();
2544         FPRReg tempFPR = temp.fpr();
2545         FPRReg resultFPR = result.fpr();
2546         JITCompiler::JumpList done;
2547     
2548         JITCompiler::Jump isInteger = m_jit.branch32(
2549             MacroAssembler::Equal, op1TagGPR, TrustedImm32(JSValue::Int32Tag));
2550
2551         if (node->child1().useKind() == NotCellUse) {
2552             JITCompiler::Jump isNumber = m_jit.branch32(JITCompiler::Below, op1TagGPR, JITCompiler::TrustedImm32(JSValue::LowestTag + 1));
2553             JITCompiler::Jump isUndefined = m_jit.branch32(JITCompiler::Equal, op1TagGPR, TrustedImm32(JSValue::UndefinedTag));
2554
2555             static const double zero = 0;
2556             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&zero), resultFPR);
2557
2558             JITCompiler::Jump isNull = m_jit.branch32(JITCompiler::Equal, op1TagGPR, TrustedImm32(JSValue::NullTag));
2559             done.append(isNull);
2560
2561             DFG_TYPE_CHECK(JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), ~SpecCell, m_jit.branch32(JITCompiler::NotEqual, op1TagGPR, TrustedImm32(JSValue::BooleanTag)));
2562
2563             JITCompiler::Jump isFalse = m_jit.branchTest32(JITCompiler::Zero, op1PayloadGPR, TrustedImm32(1));
2564             static const double one = 1;
2565             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&one), resultFPR);
2566             done.append(m_jit.jump());
2567             done.append(isFalse);
2568
2569             isUndefined.link(&m_jit);
2570             static const double NaN = PNaN;
2571             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&NaN), resultFPR);
2572             done.append(m_jit.jump());
2573
2574             isNumber.link(&m_jit);
2575         } else if (needsTypeCheck(node->child1(), SpecBytecodeNumber)) {
2576             typeCheck(
2577                 JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), SpecBytecodeNumber,
2578                 m_jit.branch32(MacroAssembler::AboveOrEqual, op1TagGPR, TrustedImm32(JSValue::LowestTag)));
2579         }
2580
2581         unboxDouble(op1TagGPR, op1PayloadGPR, resultFPR, tempFPR);
2582         done.append(m_jit.jump());
2583     
2584         isInteger.link(&m_jit);
2585         m_jit.convertInt32ToDouble(op1PayloadGPR, resultFPR);
2586         done.link(&m_jit);
2587 #endif // USE(JSVALUE64)
2588     
2589         doubleResult(resultFPR, node);
2590         return;
2591     }
2592         
2593 #if USE(JSVALUE64)
2594     case Int52RepUse: {
2595         SpeculateStrictInt52Operand value(this, node->child1());
2596         FPRTemporary result(this);
2597         
2598         GPRReg valueGPR = value.gpr();
2599         FPRReg resultFPR = result.fpr();
2600
2601         m_jit.convertInt64ToDouble(valueGPR, resultFPR);
2602         
2603         doubleResult(resultFPR, node);
2604         return;
2605     }
2606 #endif // USE(JSVALUE64)
2607         
2608     default:
2609         RELEASE_ASSERT_NOT_REACHED();
2610         return;
2611     }
2612 }
2613
2614 void SpeculativeJIT::compileValueRep(Node* node)
2615 {
2616     switch (node->child1().useKind()) {
2617     case DoubleRepUse: {
2618         SpeculateDoubleOperand value(this, node->child1());
2619         JSValueRegsTemporary result(this);
2620         
2621         FPRReg valueFPR = value.fpr();
2622         JSValueRegs resultRegs = result.regs();
2623         
2624         // It's very tempting to in-place filter the value to indicate that it's not impure NaN
2625         // anymore. Unfortunately, this would be unsound. If it's a GetLocal or if the value was
2626         // subject to a prior SetLocal, filtering the value would imply that the corresponding
2627         // local was purified.
2628         if (needsTypeCheck(node->child1(), ~SpecDoubleImpureNaN))
2629             m_jit.purifyNaN(valueFPR);
2630
2631         boxDouble(valueFPR, resultRegs);
2632         
2633         jsValueResult(resultRegs, node);
2634         return;
2635     }
2636         
2637 #if USE(JSVALUE64)
2638     case Int52RepUse: {
2639         SpeculateStrictInt52Operand value(this, node->child1());
2640         GPRTemporary result(this);
2641         
2642         GPRReg valueGPR = value.gpr();
2643         GPRReg resultGPR = result.gpr();
2644         
2645         boxInt52(valueGPR, resultGPR, DataFormatStrictInt52);
2646         
2647         jsValueResult(resultGPR, node);
2648         return;
2649     }
2650 #endif // USE(JSVALUE64)
2651         
2652     default:
2653         RELEASE_ASSERT_NOT_REACHED();
2654         return;
2655     }
2656 }
2657
2658 static double clampDoubleToByte(double d)
2659 {
2660     d += 0.5;
2661     if (!(d > 0))
2662         d = 0;
2663     else if (d > 255)
2664         d = 255;
2665     return d;
2666 }
2667
2668 static void compileClampIntegerToByte(JITCompiler& jit, GPRReg result)
2669 {
2670     MacroAssembler::Jump inBounds = jit.branch32(MacroAssembler::BelowOrEqual, result, JITCompiler::TrustedImm32(0xff));
2671     MacroAssembler::Jump tooBig = jit.branch32(MacroAssembler::GreaterThan, result, JITCompiler::TrustedImm32(0xff));
2672     jit.xorPtr(result, result);
2673     MacroAssembler::Jump clamped = jit.jump();
2674     tooBig.link(&jit);
2675     jit.move(JITCompiler::TrustedImm32(255), result);
2676     clamped.link(&jit);
2677     inBounds.link(&jit);
2678 }
2679
2680 static void compileClampDoubleToByte(JITCompiler& jit, GPRReg result, FPRReg source, FPRReg scratch)
2681 {
2682     // Unordered compare so we pick up NaN
2683     static const double zero = 0;
2684     static const double byteMax = 255;
2685     static const double half = 0.5;
2686     jit.loadDouble(MacroAssembler::TrustedImmPtr(&zero), scratch);
2687     MacroAssembler::Jump tooSmall = jit.branchDouble(MacroAssembler::DoubleLessThanOrEqualOrUnordered, source, scratch);
2688     jit.loadDouble(MacroAssembler::TrustedImmPtr(&byteMax), scratch);
2689     MacroAssembler::Jump tooBig = jit.branchDouble(MacroAssembler::DoubleGreaterThan, source, scratch);
2690     
2691     jit.loadDouble(MacroAssembler::TrustedImmPtr(&half), scratch);
2692     // FIXME: This should probably just use a floating point round!
2693     // https://bugs.webkit.org/show_bug.cgi?id=72054
2694     jit.addDouble(source, scratch);
2695     jit.truncateDoubleToInt32(scratch, result);   
2696     MacroAssembler::Jump truncatedInt = jit.jump();
2697     
2698     tooSmall.link(&jit);
2699     jit.xorPtr(result, result);
2700     MacroAssembler::Jump zeroed = jit.jump();
2701     
2702     tooBig.link(&jit);
2703     jit.move(JITCompiler::TrustedImm32(255), result);
2704     
2705     truncatedInt.link(&jit);
2706     zeroed.link(&jit);
2707
2708 }
2709
2710 JITCompiler::Jump SpeculativeJIT::jumpForTypedArrayOutOfBounds(Node* node, GPRReg baseGPR, GPRReg indexGPR)
2711 {
2712     if (node->op() == PutByValAlias)
2713         return JITCompiler::Jump();
2714     JSArrayBufferView* view = m_jit.graph().tryGetFoldableView(
2715         m_state.forNode(m_jit.graph().child(node, 0)).m_value, node->arrayMode());
2716     if (view) {
2717         uint32_t length = view->length();
2718         Node* indexNode = m_jit.graph().child(node, 1).node();
2719         if (indexNode->isInt32Constant() && indexNode->asUInt32() < length)
2720             return JITCompiler::Jump();
2721         return m_jit.branch32(
2722             MacroAssembler::AboveOrEqual, indexGPR, MacroAssembler::Imm32(length));
2723     }
2724     return m_jit.branch32(
2725         MacroAssembler::AboveOrEqual, indexGPR,
2726         MacroAssembler::Address(baseGPR, JSArrayBufferView::offsetOfLength()));
2727 }
2728
2729 void SpeculativeJIT::emitTypedArrayBoundsCheck(Node* node, GPRReg baseGPR, GPRReg indexGPR)
2730 {
2731     JITCompiler::Jump jump = jumpForTypedArrayOutOfBounds(node, baseGPR, indexGPR);
2732     if (!jump.isSet())
2733         return;
2734     speculationCheck(OutOfBounds, JSValueRegs(), 0, jump);
2735 }
2736
2737 JITCompiler::Jump SpeculativeJIT::jumpForTypedArrayIsNeuteredIfOutOfBounds(Node* node, GPRReg base, JITCompiler::Jump outOfBounds)
2738 {
2739     JITCompiler::Jump done;
2740     if (outOfBounds.isSet()) {
2741         done = m_jit.jump();
2742         if (node->arrayMode().isInBounds())
2743             speculationCheck(OutOfBounds, JSValueSource(), 0, outOfBounds);
2744         else {
2745             outOfBounds.link(&m_jit);
2746
2747             JITCompiler::Jump notWasteful = m_jit.branch32(
2748                 MacroAssembler::NotEqual,
2749                 MacroAssembler::Address(base, JSArrayBufferView::offsetOfMode()),
2750                 TrustedImm32(WastefulTypedArray));
2751
2752             JITCompiler::Jump hasNullVector = m_jit.branchTestPtr(
2753                 MacroAssembler::Zero,
2754                 MacroAssembler::Address(base, JSArrayBufferView::offsetOfVector()));
2755             speculationCheck(Uncountable, JSValueSource(), node, hasNullVector);
2756             notWasteful.link(&m_jit);
2757         }
2758     }
2759     return done;
2760 }
2761
2762 void SpeculativeJIT::compileGetByValOnIntTypedArray(Node* node, TypedArrayType type)
2763 {
2764     ASSERT(isInt(type));
2765     
2766     SpeculateCellOperand base(this, node->child1());
2767     SpeculateStrictInt32Operand property(this, node->child2());
2768     StorageOperand storage(this, node->child3());
2769
2770     GPRReg baseReg = base.gpr();
2771     GPRReg propertyReg = property.gpr();
2772     GPRReg storageReg = storage.gpr();
2773
2774     GPRTemporary result(this);
2775     GPRReg resultReg = result.gpr();
2776
2777     ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2778
2779     emitTypedArrayBoundsCheck(node, baseReg, propertyReg);
2780     switch (elementSize(type)) {
2781     case 1:
2782         if (isSigned(type))
2783             m_jit.load8SignedExtendTo32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2784         else
2785             m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2786         break;
2787     case 2:
2788         if (isSigned(type))
2789             m_jit.load16SignedExtendTo32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2790         else
2791             m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2792         break;
2793     case 4:
2794         m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
2795         break;
2796     default:
2797         CRASH();
2798     }
2799     if (elementSize(type) < 4 || isSigned(type)) {
2800         int32Result(resultReg, node);
2801         return;
2802     }
2803     
2804     ASSERT(elementSize(type) == 4 && !isSigned(type));
2805     if (node->shouldSpeculateInt32()) {
2806         speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, resultReg, TrustedImm32(0)));
2807         int32Result(resultReg, node);
2808         return;
2809     }
2810     
2811 #if USE(JSVALUE64)
2812     if (node->shouldSpeculateAnyInt()) {
2813         m_jit.zeroExtend32ToPtr(resultReg, resultReg);
2814         strictInt52Result(resultReg, node);
2815         return;
2816     }
2817 #endif
2818     
2819     FPRTemporary fresult(this);
2820     m_jit.convertInt32ToDouble(resultReg, fresult.fpr());
2821     JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, resultReg, TrustedImm32(0));
2822     m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), fresult.fpr());
2823     positive.link(&m_jit);
2824     doubleResult(fresult.fpr(), node);
2825 }
2826
2827 void SpeculativeJIT::compilePutByValForIntTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
2828 {
2829     ASSERT(isInt(type));
2830     
2831     StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
2832     GPRReg storageReg = storage.gpr();
2833     
2834     Edge valueUse = m_jit.graph().varArgChild(node, 2);
2835     
2836     GPRTemporary value;
2837 #if USE(JSVALUE32_64)
2838     GPRTemporary propertyTag;
2839     GPRTemporary valueTag;
2840 #endif
2841
2842     GPRReg valueGPR = InvalidGPRReg;
2843 #if USE(JSVALUE32_64)
2844     GPRReg propertyTagGPR = InvalidGPRReg;
2845     GPRReg valueTagGPR = InvalidGPRReg;
2846 #endif
2847
2848     JITCompiler::JumpList slowPathCases;
2849
2850     if (valueUse->isConstant()) {
2851         JSValue jsValue = valueUse->asJSValue();
2852         if (!jsValue.isNumber()) {
2853             terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
2854             noResult(node);
2855             return;
2856         }
2857         double d = jsValue.asNumber();
2858         if (isClamped(type)) {
2859             ASSERT(elementSize(type) == 1);
2860             d = clampDoubleToByte(d);
2861         }
2862         GPRTemporary scratch(this);
2863         GPRReg scratchReg = scratch.gpr();
2864         m_jit.move(Imm32(toInt32(d)), scratchReg);
2865         value.adopt(scratch);
2866         valueGPR = scratchReg;
2867     } else {
2868         switch (valueUse.useKind()) {
2869         case Int32Use: {
2870             SpeculateInt32Operand valueOp(this, valueUse);
2871             GPRTemporary scratch(this);
2872             GPRReg scratchReg = scratch.gpr();
2873             m_jit.move(valueOp.gpr(), scratchReg);
2874             if (isClamped(type)) {
2875                 ASSERT(elementSize(type) == 1);
2876                 compileClampIntegerToByte(m_jit, scratchReg);
2877             }
2878             value.adopt(scratch);
2879             valueGPR = scratchReg;
2880             break;
2881         }
2882             
2883 #if USE(JSVALUE64)
2884         case Int52RepUse: {
2885             SpeculateStrictInt52Operand valueOp(this, valueUse);
2886             GPRTemporary scratch(this);
2887             GPRReg scratchReg = scratch.gpr();
2888             m_jit.move(valueOp.gpr(), scratchReg);
2889             if (isClamped(type)) {
2890                 ASSERT(elementSize(type) == 1);
2891                 MacroAssembler::Jump inBounds = m_jit.branch64(
2892                     MacroAssembler::BelowOrEqual, scratchReg, JITCompiler::TrustedImm64(0xff));
2893                 MacroAssembler::Jump tooBig = m_jit.branch64(
2894                     MacroAssembler::GreaterThan, scratchReg, JITCompiler::TrustedImm64(0xff));
2895                 m_jit.move(TrustedImm32(0), scratchReg);
2896                 MacroAssembler::Jump clamped = m_jit.jump();
2897                 tooBig.link(&m_jit);
2898                 m_jit.move(JITCompiler::TrustedImm32(255), scratchReg);
2899                 clamped.link(&m_jit);
2900                 inBounds.link(&m_jit);
2901             }
2902             value.adopt(scratch);
2903             valueGPR = scratchReg;
2904             break;
2905         }
2906 #endif // USE(JSVALUE64)
2907             
2908         case DoubleRepUse: {
2909             if (isClamped(type)) {
2910                 ASSERT(elementSize(type) == 1);
2911                 SpeculateDoubleOperand valueOp(this, valueUse);
2912                 GPRTemporary result(this);
2913                 FPRTemporary floatScratch(this);
2914                 FPRReg fpr = valueOp.fpr();
2915                 GPRReg gpr = result.gpr();
2916                 compileClampDoubleToByte(m_jit, gpr, fpr, floatScratch.fpr());
2917                 value.adopt(result);
2918                 valueGPR = gpr;
2919             } else {
2920 #if USE(JSVALUE32_64)
2921                 GPRTemporary realPropertyTag(this);
2922                 propertyTag.adopt(realPropertyTag);
2923                 propertyTagGPR = propertyTag.gpr();
2924
2925                 GPRTemporary realValueTag(this);
2926                 valueTag.adopt(realValueTag);
2927                 valueTagGPR = valueTag.gpr();
2928 #endif
2929                 SpeculateDoubleOperand valueOp(this, valueUse);
2930                 GPRTemporary result(this);
2931                 FPRReg fpr = valueOp.fpr();
2932                 GPRReg gpr = result.gpr();
2933                 MacroAssembler::Jump notNaN = m_jit.branchDouble(MacroAssembler::DoubleEqual, fpr, fpr);
2934                 m_jit.xorPtr(gpr, gpr);
2935                 MacroAssembler::JumpList fixed(m_jit.jump());
2936                 notNaN.link(&m_jit);
2937
2938                 fixed.append(m_jit.branchTruncateDoubleToInt32(
2939                     fpr, gpr, MacroAssembler::BranchIfTruncateSuccessful));
2940
2941 #if USE(JSVALUE64)
2942                 m_jit.or64(GPRInfo::tagTypeNumberRegister, property);
2943                 boxDouble(fpr, gpr);
2944 #else
2945                 m_jit.move(TrustedImm32(JSValue::Int32Tag), propertyTagGPR);
2946                 boxDouble(fpr, valueTagGPR, gpr);
2947 #endif
2948                 slowPathCases.append(m_jit.jump());
2949
2950                 fixed.link(&m_jit);
2951                 value.adopt(result);
2952                 valueGPR = gpr;
2953             }
2954             break;
2955         }
2956             
2957         default:
2958             RELEASE_ASSERT_NOT_REACHED();
2959             break;
2960         }
2961     }
2962     
2963     ASSERT_UNUSED(valueGPR, valueGPR != property);
2964     ASSERT(valueGPR != base);
2965     ASSERT(valueGPR != storageReg);
2966     JITCompiler::Jump outOfBounds = jumpForTypedArrayOutOfBounds(node, base, property);
2967
2968     switch (elementSize(type)) {
2969     case 1:
2970         m_jit.store8(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesOne));
2971         break;
2972     case 2:
2973         m_jit.store16(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesTwo));
2974         break;
2975     case 4:
2976         m_jit.store32(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
2977         break;
2978     default:
2979         CRASH();
2980     }
2981
2982     JITCompiler::Jump done = jumpForTypedArrayIsNeuteredIfOutOfBounds(node, base, outOfBounds);
2983     if (done.isSet())
2984         done.link(&m_jit);
2985
2986     if (!slowPathCases.empty()) {
2987 #if USE(JSVALUE64)
2988         if (node->op() == PutByValDirect) {
2989             addSlowPathGenerator(slowPathCall(
2990                 slowPathCases, this,
2991                 m_jit.isStrictModeFor(node->origin.semantic) ? operationPutByValDirectStrict : operationPutByValDirectNonStrict,
2992                 NoResult, base, property, valueGPR));
2993         } else {
2994             addSlowPathGenerator(slowPathCall(
2995                 slowPathCases, this,
2996                 m_jit.isStrictModeFor(node->origin.semantic) ? operationPutByValStrict : operationPutByValNonStrict,
2997                 NoResult, base, property, valueGPR));
2998         }
2999 #else // not USE(JSVALUE64)
3000         if (node->op() == PutByValDirect) {
3001             addSlowPathGenerator(slowPathCall(
3002                 slowPathCases, this,
3003                 m_jit.codeBlock()->isStrictMode() ? operationPutByValDirectCellStrict : operationPutByValDirectCellNonStrict,
3004                 NoResult, base, JSValueRegs(propertyTagGPR, property), JSValueRegs(valueTagGPR, valueGPR)));
3005         } else {
3006             addSlowPathGenerator(slowPathCall(
3007                 slowPathCases, this,
3008                 m_jit.codeBlock()->isStrictMode() ? operationPutByValCellStrict : operationPutByValCellNonStrict,
3009                 NoResult, base, JSValueRegs(propertyTagGPR, property), JSValueRegs(valueTagGPR, valueGPR)));
3010         }
3011 #endif
3012     }
3013     noResult(node);
3014 }
3015
3016 void SpeculativeJIT::compileGetByValOnFloatTypedArray(Node* node, TypedArrayType type)
3017 {
3018     ASSERT(isFloat(type));
3019     
3020     SpeculateCellOperand base(this, node->child1());
3021     SpeculateStrictInt32Operand property(this, node->child2());
3022     StorageOperand storage(this, node->child3());
3023
3024     GPRReg baseReg = base.gpr();
3025     GPRReg propertyReg = property.gpr();
3026     GPRReg storageReg = storage.gpr();
3027
3028     ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
3029
3030     FPRTemporary result(this);
3031     FPRReg resultReg = result.fpr();
3032     emitTypedArrayBoundsCheck(node, baseReg, propertyReg);
3033     switch (elementSize(type)) {
3034     case 4:
3035         m_jit.loadFloat(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
3036         m_jit.convertFloatToDouble(resultReg, resultReg);
3037         break;
3038     case 8: {
3039         m_jit.loadDouble(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight), resultReg);
3040         break;
3041     }
3042     default:
3043         RELEASE_ASSERT_NOT_REACHED();
3044     }
3045     
3046     doubleResult(resultReg, node);
3047 }
3048
3049 void SpeculativeJIT::compilePutByValForFloatTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
3050 {
3051     ASSERT(isFloat(type));
3052     
3053     StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
3054     GPRReg storageReg = storage.gpr();
3055     
3056     Edge baseUse = m_jit.graph().varArgChild(node, 0);
3057     Edge valueUse = m_jit.graph().varArgChild(node, 2);
3058
3059     SpeculateDoubleOperand valueOp(this, valueUse);
3060     FPRTemporary scratch(this);
3061     FPRReg valueFPR = valueOp.fpr();
3062     FPRReg scratchFPR = scratch.fpr();
3063
3064     ASSERT_UNUSED(baseUse, node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(baseUse)));
3065     
3066     MacroAssembler::Jump outOfBounds = jumpForTypedArrayOutOfBounds(node, base, property);
3067     
3068     switch (elementSize(type)) {
3069     case 4: {
3070         m_jit.moveDouble(valueFPR, scratchFPR);
3071         m_jit.convertDoubleToFloat(valueFPR, scratchFPR);
3072         m_jit.storeFloat(scratchFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
3073         break;
3074     }
3075     case 8:
3076         m_jit.storeDouble(valueFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesEight));
3077         break;
3078     default:
3079         RELEASE_ASSERT_NOT_REACHED();
3080     }
3081
3082     JITCompiler::Jump done = jumpForTypedArrayIsNeuteredIfOutOfBounds(node, base, outOfBounds);
3083     if (done.isSet())
3084         done.link(&m_jit);
3085     noResult(node);
3086 }
3087
3088 void SpeculativeJIT::compileInstanceOfForObject(Node*, GPRReg valueReg, GPRReg prototypeReg, GPRReg scratchReg, GPRReg scratch2Reg)
3089 {
3090     // Check that prototype is an object.
3091     speculationCheck(BadType, JSValueRegs(), 0, m_jit.branchIfNotObject(prototypeReg));
3092     
3093     // Initialize scratchReg with the value being checked.
3094     m_jit.move(valueReg, scratchReg);
3095     
3096     // Walk up the prototype chain of the value (in scratchReg), comparing to prototypeReg.
3097     MacroAssembler::Label loop(&m_jit);
3098     MacroAssembler::Jump performDefaultHasInstance = m_jit.branch8(MacroAssembler::Equal,
3099         MacroAssembler::Address(scratchReg, JSCell::typeInfoTypeOffset()), TrustedImm32(ProxyObjectType));
3100     m_jit.emitLoadStructure(scratchReg, scratchReg, scratch2Reg);
3101     m_jit.loadPtr(MacroAssembler::Address(scratchReg, Structure::prototypeOffset() + CellPayloadOffset), scratchReg);
3102     MacroAssembler::Jump isInstance = m_jit.branchPtr(MacroAssembler::Equal, scratchReg, prototypeReg);
3103 #if USE(JSVALUE64)
3104     m_jit.branchIfCell(JSValueRegs(scratchReg)).linkTo(loop, &m_jit);
3105 #else
3106     m_jit.branchTestPtr(MacroAssembler::NonZero, scratchReg).linkTo(loop, &m_jit);
3107 #endif
3108     
3109     // No match - result is false.
3110 #if USE(JSVALUE64)
3111     m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(false))), scratchReg);
3112 #else
3113     m_jit.move(MacroAssembler::TrustedImm32(0), scratchReg);
3114 #endif
3115     MacroAssembler::JumpList doneJumps; 
3116     doneJumps.append(m_jit.jump());
3117
3118     performDefaultHasInstance.link(&m_jit);
3119     silentSpillAllRegisters(scratchReg);
3120     callOperation(operationDefaultHasInstance, scratchReg, valueReg, prototypeReg); 
3121     silentFillAllRegisters(scratchReg);
3122     m_jit.exceptionCheck();
3123 #if USE(JSVALUE64)
3124     m_jit.or32(TrustedImm32(ValueFalse), scratchReg);
3125 #endif
3126     doneJumps.append(m_jit.jump());
3127     
3128     isInstance.link(&m_jit);
3129 #if USE(JSVALUE64)
3130     m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(true))), scratchReg);
3131 #else
3132     m_jit.move(MacroAssembler::TrustedImm32(1), scratchReg);
3133 #endif
3134     
3135     doneJumps.link(&m_jit);
3136 }
3137
3138 void SpeculativeJIT::compileCheckTypeInfoFlags(Node* node)
3139 {
3140     SpeculateCellOperand base(this, node->child1());
3141
3142     GPRReg baseGPR = base.gpr();
3143
3144     speculationCheck(BadTypeInfoFlags, JSValueRegs(), 0, m_jit.branchTest8(MacroAssembler::Zero, MacroAssembler::Address(baseGPR, JSCell::typeInfoFlagsOffset()), MacroAssembler::TrustedImm32(node->typeInfoOperand())));
3145
3146     noResult(node);
3147 }
3148
3149 void SpeculativeJIT::compileInstanceOf(Node* node)
3150 {
3151     if (node->child1().useKind() == UntypedUse) {
3152         // It might not be a cell. Speculate less aggressively.
3153         // Or: it might only be used once (i.e. by us), so we get zero benefit
3154         // from speculating any more aggressively than we absolutely need to.
3155         
3156         JSValueOperand value(this, node->child1());
3157         SpeculateCellOperand prototype(this, node->child2());
3158         GPRTemporary scratch(this);
3159         GPRTemporary scratch2(this);
3160         
3161         GPRReg prototypeReg = prototype.gpr();
3162         GPRReg scratchReg = scratch.gpr();
3163         GPRReg scratch2Reg = scratch2.gpr();
3164         
3165         MacroAssembler::Jump isCell = m_jit.branchIfCell(value.jsValueRegs());
3166         GPRReg valueReg = value.jsValueRegs().payloadGPR();
3167         moveFalseTo(scratchReg);
3168
3169         MacroAssembler::Jump done = m_jit.jump();
3170         
3171         isCell.link(&m_jit);
3172         
3173         compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg, scratch2Reg);
3174         
3175         done.link(&m_jit);
3176
3177         blessedBooleanResult(scratchReg, node);
3178         return;
3179     }
3180     
3181     SpeculateCellOperand value(this, node->child1());
3182     SpeculateCellOperand prototype(this, node->child2());
3183     
3184     GPRTemporary scratch(this);
3185     GPRTemporary scratch2(this);
3186     
3187     GPRReg valueReg = value.gpr();
3188     GPRReg prototypeReg = prototype.gpr();
3189     GPRReg scratchReg = scratch.gpr();
3190     GPRReg scratch2Reg = scratch2.gpr();
3191     
3192     compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg, scratch2Reg);
3193
3194     blessedBooleanResult(scratchReg, node);
3195 }
3196
3197 template<typename SnippetGenerator, J_JITOperation_EJJ snippetSlowPathFunction>
3198 void SpeculativeJIT::emitUntypedBitOp(Node* node)
3199 {
3200     Edge& leftChild = node->child1();
3201     Edge& rightChild = node->child2();
3202
3203     if (isKnownNotNumber(leftChild.node()) || isKnownNotNumber(rightChild.node())) {
3204         JSValueOperand left(this, leftChild);
3205         JSValueOperand right(this, rightChild);
3206         JSValueRegs leftRegs = left.jsValueRegs();
3207         JSValueRegs rightRegs = right.jsValueRegs();
3208 #if USE(JSVALUE64)
3209         GPRTemporary result(this);
3210         JSValueRegs resultRegs = JSValueRegs(result.gpr());
3211 #else
3212         GPRTemporary resultTag(this);
3213         GPRTemporary resultPayload(this);
3214         JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3215 #endif
3216         flushRegisters();
3217         callOperation(snippetSlowPathFunction, resultRegs, leftRegs, rightRegs);
3218         m_jit.exceptionCheck();
3219
3220         jsValueResult(resultRegs, node);
3221         return;
3222     }
3223
3224     Optional<JSValueOperand> left;
3225     Optional<JSValueOperand> right;
3226
3227     JSValueRegs leftRegs;
3228     JSValueRegs rightRegs;
3229
3230 #if USE(JSVALUE64)
3231     GPRTemporary result(this);
3232     JSValueRegs resultRegs = JSValueRegs(result.gpr());
3233     GPRTemporary scratch(this);
3234     GPRReg scratchGPR = scratch.gpr();
3235 #else
3236     GPRTemporary resultTag(this);
3237     GPRTemporary resultPayload(this);
3238     JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3239     GPRReg scratchGPR = resultTag.gpr();
3240 #endif
3241
3242     SnippetOperand leftOperand;
3243     SnippetOperand rightOperand;
3244
3245     // The snippet generator does not support both operands being constant. If the left
3246     // operand is already const, we'll ignore the right operand's constness.
3247     if (leftChild->isInt32Constant())
3248         leftOperand.setConstInt32(leftChild->asInt32());
3249     else if (rightChild->isInt32Constant())
3250         rightOperand.setConstInt32(rightChild->asInt32());
3251
3252     RELEASE_ASSERT(!leftOperand.isConst() || !rightOperand.isConst());
3253
3254     if (!leftOperand.isConst()) {
3255         left = JSValueOperand(this, leftChild);
3256         leftRegs = left->jsValueRegs();
3257     }
3258     if (!rightOperand.isConst()) {
3259         right = JSValueOperand(this, rightChild);
3260         rightRegs = right->jsValueRegs();
3261     }
3262
3263     SnippetGenerator gen(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs, scratchGPR);
3264     gen.generateFastPath(m_jit);
3265
3266     ASSERT(gen.didEmitFastPath());
3267     gen.endJumpList().append(m_jit.jump());
3268
3269     gen.slowPathJumpList().link(&m_jit);
3270     silentSpillAllRegisters(resultRegs);
3271
3272     if (leftOperand.isConst()) {
3273         leftRegs = resultRegs;
3274         m_jit.moveValue(leftChild->asJSValue(), leftRegs);
3275     } else if (rightOperand.isConst()) {
3276         rightRegs = resultRegs;
3277         m_jit.moveValue(rightChild->asJSValue(), rightRegs);
3278     }
3279
3280     callOperation(snippetSlowPathFunction, resultRegs, leftRegs, rightRegs);
3281
3282     silentFillAllRegisters(resultRegs);
3283     m_jit.exceptionCheck();
3284
3285     gen.endJumpList().link(&m_jit);
3286     jsValueResult(resultRegs, node);
3287 }
3288
3289 void SpeculativeJIT::compileBitwiseOp(Node* node)
3290 {
3291     NodeType op = node->op();
3292     Edge& leftChild = node->child1();
3293     Edge& rightChild = node->child2();
3294
3295     if (leftChild.useKind() == UntypedUse || rightChild.useKind() == UntypedUse) {
3296         switch (op) {
3297         case BitAnd:
3298             emitUntypedBitOp<JITBitAndGenerator, operationValueBitAnd>(node);
3299             return;
3300         case BitOr:
3301             emitUntypedBitOp<JITBitOrGenerator, operationValueBitOr>(node);
3302             return;
3303         case BitXor:
3304             emitUntypedBitOp<JITBitXorGenerator, operationValueBitXor>(node);
3305             return;
3306         default:
3307             RELEASE_ASSERT_NOT_REACHED();
3308         }
3309     }
3310
3311     if (leftChild->isInt32Constant()) {
3312         SpeculateInt32Operand op2(this, rightChild);
3313         GPRTemporary result(this, Reuse, op2);
3314
3315         bitOp(op, leftChild->asInt32(), op2.gpr(), result.gpr());
3316
3317         int32Result(result.gpr(), node);
3318
3319     } else if (rightChild->isInt32Constant()) {
3320         SpeculateInt32Operand op1(this, leftChild);
3321         GPRTemporary result(this, Reuse, op1);
3322
3323         bitOp(op, rightChild->asInt32(), op1.gpr(), result.gpr());
3324
3325         int32Result(result.gpr(), node);
3326
3327     } else {
3328         SpeculateInt32Operand op1(this, leftChild);
3329         SpeculateInt32Operand op2(this, rightChild);
3330         GPRTemporary result(this, Reuse, op1, op2);
3331         
3332         GPRReg reg1 = op1.gpr();
3333         GPRReg reg2 = op2.gpr();
3334         bitOp(op, reg1, reg2, result.gpr());
3335         
3336         int32Result(result.gpr(), node);
3337     }
3338 }
3339
3340 void SpeculativeJIT::emitUntypedRightShiftBitOp(Node* node)
3341 {
3342     J_JITOperation_EJJ snippetSlowPathFunction = node->op() == BitRShift
3343         ? operationValueBitRShift : operationValueBitURShift;
3344     JITRightShiftGenerator::ShiftType shiftType = node->op() == BitRShift
3345         ? JITRightShiftGenerator::SignedShift : JITRightShiftGenerator::UnsignedShift;
3346
3347     Edge& leftChild = node->child1();
3348     Edge& rightChild = node->child2();
3349
3350     if (isKnownNotNumber(leftChild.node()) || isKnownNotNumber(rightChild.node())) {
3351         JSValueOperand left(this, leftChild);
3352         JSValueOperand right(this, rightChild);
3353         JSValueRegs leftRegs = left.jsValueRegs();
3354         JSValueRegs rightRegs = right.jsValueRegs();
3355 #if USE(JSVALUE64)
3356         GPRTemporary result(this);
3357         JSValueRegs resultRegs = JSValueRegs(result.gpr());
3358 #else
3359         GPRTemporary resultTag(this);
3360         GPRTemporary resultPayload(this);
3361         JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3362 #endif
3363         flushRegisters();
3364         callOperation(snippetSlowPathFunction, resultRegs, leftRegs, rightRegs);
3365         m_jit.exceptionCheck();
3366
3367         jsValueResult(resultRegs, node);
3368         return;
3369     }
3370
3371     Optional<JSValueOperand> left;
3372     Optional<JSValueOperand> right;
3373
3374     JSValueRegs leftRegs;
3375     JSValueRegs rightRegs;
3376
3377     FPRTemporary leftNumber(this);
3378     FPRReg leftFPR = leftNumber.fpr();
3379
3380 #if USE(JSVALUE64)
3381     GPRTemporary result(this);
3382     JSValueRegs resultRegs = JSValueRegs(result.gpr());
3383     GPRTemporary scratch(this);
3384     GPRReg scratchGPR = scratch.gpr();
3385     FPRReg scratchFPR = InvalidFPRReg;
3386 #else
3387     GPRTemporary resultTag(this);
3388     GPRTemporary resultPayload(this);
3389     JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3390     GPRReg scratchGPR = resultTag.gpr();
3391     FPRTemporary fprScratch(this);
3392     FPRReg scratchFPR = fprScratch.fpr();
3393 #endif
3394
3395     SnippetOperand leftOperand;
3396     SnippetOperand rightOperand;
3397
3398     // The snippet generator does not support both operands being constant. If the left
3399     // operand is already const, we'll ignore the right operand's constness.
3400     if (leftChild->isInt32Constant())
3401         leftOperand.setConstInt32(leftChild->asInt32());
3402     else if (rightChild->isInt32Constant())
3403         rightOperand.setConstInt32(rightChild->asInt32());
3404
3405     RELEASE_ASSERT(!leftOperand.isConst() || !rightOperand.isConst());
3406
3407     if (!leftOperand.isConst()) {
3408         left = JSValueOperand(this, leftChild);
3409         leftRegs = left->jsValueRegs();
3410     }
3411     if (!rightOperand.isConst()) {
3412         right = JSValueOperand(this, rightChild);
3413         rightRegs = right->jsValueRegs();
3414     }
3415
3416     JITRightShiftGenerator gen(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs,
3417         leftFPR, scratchGPR, scratchFPR, shiftType);
3418     gen.generateFastPath(m_jit);
3419
3420     ASSERT(gen.didEmitFastPath());
3421     gen.endJumpList().append(m_jit.jump());
3422
3423     gen.slowPathJumpList().link(&m_jit);
3424     silentSpillAllRegisters(resultRegs);
3425
3426     if (leftOperand.isConst()) {
3427         leftRegs = resultRegs;
3428         m_jit.moveValue(leftChild->asJSValue(), leftRegs);
3429     } else if (rightOperand.isConst()) {
3430         rightRegs = resultRegs;
3431         m_jit.moveValue(rightChild->asJSValue(), rightRegs);
3432     }
3433
3434     callOperation(snippetSlowPathFunction, resultRegs, leftRegs, rightRegs);
3435
3436     silentFillAllRegisters(resultRegs);
3437     m_jit.exceptionCheck();
3438
3439     gen.endJumpList().link(&m_jit);
3440     jsValueResult(resultRegs, node);
3441     return;
3442 }
3443
3444 void SpeculativeJIT::compileShiftOp(Node* node)
3445 {
3446     NodeType op = node->op();
3447     Edge& leftChild = node->child1();
3448     Edge& rightChild = node->child2();
3449
3450     if (leftChild.useKind() == UntypedUse || rightChild.useKind() == UntypedUse) {
3451         switch (op) {
3452         case BitLShift:
3453             emitUntypedBitOp<JITLeftShiftGenerator, operationValueBitLShift>(node);
3454             return;
3455         case BitRShift:
3456         case BitURShift:
3457             emitUntypedRightShiftBitOp(node);
3458             return;
3459         default:
3460             RELEASE_ASSERT_NOT_REACHED();
3461         }
3462     }
3463
3464     if (rightChild->isInt32Constant()) {
3465         SpeculateInt32Operand op1(this, leftChild);
3466         GPRTemporary result(this, Reuse, op1);
3467
3468         shiftOp(op, op1.gpr(), rightChild->asInt32() & 0x1f, result.gpr());
3469
3470         int32Result(result.gpr(), node);
3471     } else {
3472         // Do not allow shift amount to be used as the result, MacroAssembler does not permit this.
3473         SpeculateInt32Operand op1(this, leftChild);
3474         SpeculateInt32Operand op2(this, rightChild);
3475         GPRTemporary result(this, Reuse, op1);
3476
3477         GPRReg reg1 = op1.gpr();
3478         GPRReg reg2 = op2.gpr();
3479         shiftOp(op, reg1, reg2, result.gpr());
3480
3481         int32Result(result.gpr(), node);
3482     }
3483 }
3484
3485 void SpeculativeJIT::compileValueAdd(Node* node)
3486 {
3487     Edge& leftChild = node->child1();
3488     Edge& rightChild = node->child2();
3489
3490     if (isKnownNotNumber(leftChild.node()) || isKnownNotNumber(rightChild.node())) {
3491         JSValueOperand left(this, leftChild);
3492         JSValueOperand right(this, rightChild);
3493         JSValueRegs leftRegs = left.jsValueRegs();
3494         JSValueRegs rightRegs = right.jsValueRegs();
3495 #if USE(JSVALUE64)
3496         GPRTemporary result(this);
3497         JSValueRegs resultRegs = JSValueRegs(result.gpr());
3498 #else
3499         GPRTemporary resultTag(this);
3500         GPRTemporary resultPayload(this);
3501         JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3502 #endif
3503         flushRegisters();
3504         callOperation(operationValueAddNotNumber, resultRegs, leftRegs, rightRegs);
3505         m_jit.exceptionCheck();
3506     
3507         jsValueResult(resultRegs, node);
3508         return;
3509     }
3510
3511 #if USE(JSVALUE64)
3512     bool needsScratchGPRReg = true;
3513     bool needsScratchFPRReg = false;
3514 #else
3515     bool needsScratchGPRReg = true;
3516     bool needsScratchFPRReg = true;
3517 #endif
3518
3519     ArithProfile* arithProfile = m_jit.graph().baselineCodeBlockFor(node->origin.semantic)->arithProfileForBytecodeOffset(node->origin.semantic.bytecodeIndex);
3520     JITAddIC* addIC = m_jit.codeBlock()->addJITAddIC(arithProfile);
3521     auto repatchingFunction = operationValueAddOptimize;
3522     auto nonRepatchingFunction = operationValueAdd;
3523     
3524     compileMathIC(node, addIC, needsScratchGPRReg, needsScratchFPRReg, repatchingFunction, nonRepatchingFunction);
3525 }
3526
3527 template <typename Generator, typename RepatchingFunction, typename NonRepatchingFunction>
3528 void SpeculativeJIT::compileMathIC(Node* node, JITBinaryMathIC<Generator>* mathIC, bool needsScratchGPRReg, bool needsScratchFPRReg, RepatchingFunction repatchingFunction, NonRepatchingFunction nonRepatchingFunction)
3529 {
3530     Edge& leftChild = node->child1();
3531     Edge& rightChild = node->child2();
3532
3533     Optional<JSValueOperand> left;
3534     Optional<JSValueOperand> right;
3535
3536     JSValueRegs leftRegs;
3537     JSValueRegs rightRegs;
3538
3539     FPRTemporary leftNumber(this);
3540     FPRTemporary rightNumber(this);
3541     FPRReg leftFPR = leftNumber.fpr();
3542     FPRReg rightFPR = rightNumber.fpr();
3543
3544     GPRReg scratchGPR = InvalidGPRReg;
3545     FPRReg scratchFPR = InvalidFPRReg;
3546
3547     Optional<FPRTemporary> fprScratch;
3548     if (needsScratchFPRReg) {
3549         fprScratch = FPRTemporary(this);
3550         scratchFPR = fprScratch->fpr();
3551     }
3552
3553 #if USE(JSVALUE64)
3554     Optional<GPRTemporary> gprScratch;
3555     if (needsScratchGPRReg) {
3556         gprScratch = GPRTemporary(this);
3557         scratchGPR = gprScratch->gpr();
3558     }
3559     GPRTemporary result(this);
3560     JSValueRegs resultRegs = JSValueRegs(result.gpr());
3561 #else
3562     GPRTemporary resultTag(this);
3563     GPRTemporary resultPayload(this);
3564     JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3565     if (needsScratchGPRReg)
3566         scratchGPR = resultRegs.tagGPR();
3567 #endif
3568
3569     SnippetOperand leftOperand(m_state.forNode(leftChild).resultType());
3570     SnippetOperand rightOperand(m_state.forNode(rightChild).resultType());
3571
3572     // The snippet generator does not support both operands being constant. If the left
3573     // operand is already const, we'll ignore the right operand's constness.
3574     if (leftChild->isInt32Constant())
3575         leftOperand.setConstInt32(leftChild->asInt32());
3576     else if (rightChild->isInt32Constant())
3577         rightOperand.setConstInt32(rightChild->asInt32());
3578
3579     ASSERT(!leftOperand.isConst() || !rightOperand.isConst());
3580     ASSERT(!(Generator::isLeftOperandValidConstant(leftOperand) && Generator::isRightOperandValidConstant(rightOperand)));
3581
3582     if (!Generator::isLeftOperandValidConstant(leftOperand)) {
3583         left = JSValueOperand(this, leftChild);
3584         leftRegs = left->jsValueRegs();
3585     }
3586     if (!Generator::isRightOperandValidConstant(rightOperand)) {
3587         right = JSValueOperand(this, rightChild);
3588         rightRegs = right->jsValueRegs();
3589     }
3590
3591 #if ENABLE(MATH_IC_STATS)
3592     auto inlineStart = m_jit.label();
3593 #endif
3594
3595     Box<MathICGenerationState> addICGenerationState = Box<MathICGenerationState>::create();
3596     mathIC->m_generator = Generator(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs, leftFPR, rightFPR, scratchGPR, scratchFPR);
3597
3598     bool shouldEmitProfiling = false;
3599     bool generatedInline = mathIC->generateInline(m_jit, *addICGenerationState, shouldEmitProfiling);
3600
3601     if (generatedInline) {
3602         ASSERT(!addICGenerationState->slowPathJumps.empty());
3603
3604         Vector<SilentRegisterSavePlan> savePlans;
3605         silentSpillAllRegistersImpl(false, savePlans, resultRegs);
3606
3607         auto done = m_jit.label();
3608
3609         addSlowPathGenerator([=, savePlans = WTFMove(savePlans)] () {
3610             addICGenerationState->slowPathJumps.link(&m_jit);
3611             addICGenerationState->slowPathStart = m_jit.label();
3612 #if ENABLE(MATH_IC_STATS)
3613             auto slowPathStart = m_jit.label();
3614 #endif
3615
3616             silentSpill(savePlans);
3617
3618             auto innerLeftRegs = leftRegs;
3619             auto innerRightRegs = rightRegs;
3620             if (Generator::isLeftOperandValidConstant(leftOperand)) {
3621                 innerLeftRegs = resultRegs;
3622                 m_jit.moveValue(leftChild->asJSValue(), innerLeftRegs);
3623             } else if (Generator::isRightOperandValidConstant(rightOperand)) {
3624                 innerRightRegs = resultRegs;
3625                 m_jit.moveValue(rightChild->asJSValue(), innerRightRegs);
3626             }
3627
3628             if (addICGenerationState->shouldSlowPathRepatch)
3629                 addICGenerationState->slowPathCall = callOperation(bitwise_cast<J_JITOperation_EJJMic>(repatchingFunction), resultRegs, innerLeftRegs, innerRightRegs, TrustedImmPtr(mathIC));
3630             else
3631                 addICGenerationState->slowPathCall = callOperation(nonRepatchingFunction, resultRegs, innerLeftRegs, innerRightRegs);
3632
3633             silentFill(savePlans);
3634             m_jit.exceptionCheck();
3635             m_jit.jump().linkTo(done, &m_jit);
3636
3637             m_jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
3638                 mathIC->finalizeInlineCode(*addICGenerationState, linkBuffer);
3639             });
3640
3641 #if ENABLE(MATH_IC_STATS)
3642             auto slowPathEnd = m_jit.label();
3643             m_jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
3644                 size_t size = static_cast<char*>(linkBuffer.locationOf(slowPathEnd).executableAddress()) - static_cast<char*>(linkBuffer.locationOf(slowPathStart).executableAddress());
3645                 mathIC->m_generatedCodeSize += size;
3646             });
3647 #endif
3648
3649         });
3650     } else {
3651         if (Generator::isLeftOperandValidConstant(leftOperand)) {
3652             left = JSValueOperand(this, leftChild);
3653             leftRegs = left->jsValueRegs();
3654         } else if (Generator::isRightOperandValidConstant(rightOperand)) {
3655             right = JSValueOperand(this, rightChild);
3656             rightRegs = right->jsValueRegs();
3657         }
3658
3659         flushRegisters();
3660         callOperation(nonRepatchingFunction, resultRegs, leftRegs, rightRegs);
3661         m_jit.exceptionCheck();
3662     }
3663
3664 #if ENABLE(MATH_IC_STATS)
3665     auto inlineEnd = m_jit.label();
3666     m_jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
3667         size_t size = static_cast<char*>(linkBuffer.locationOf(inlineEnd).executableAddress()) - static_cast<char*>(linkBuffer.locationOf(inlineStart).executableAddress());
3668         mathIC->m_generatedCodeSize += size;
3669     });
3670 #endif
3671
3672     jsValueResult(resultRegs, node);
3673     return;
3674 }
3675
3676 void SpeculativeJIT::compileInstanceOfCustom(Node* node)
3677 {
3678     // We could do something smarter here but this case is currently super rare and unless
3679     // Symbol.hasInstance becomes popular will likely remain that way.
3680
3681     JSValueOperand value(this, node->child1());
3682     SpeculateCellOperand constructor(this, node->child2());
3683     JSValueOperand hasInstanceValue(this, node->child3());
3684     GPRTemporary result(this);
3685
3686     JSValueRegs valueRegs = value.jsValueRegs();
3687     GPRReg constructorGPR = constructor.gpr();
3688     JSValueRegs hasInstanceRegs = hasInstanceValue.jsValueRegs();
3689     GPRReg resultGPR = result.gpr();
3690
3691     MacroAssembler::Jump slowCase = m_jit.jump();
3692
3693     addSlowPathGenerator(slowPathCall(slowCase, this, operationInstanceOfCustom, resultGPR, valueRegs, constructorGPR, hasInstanceRegs));
3694
3695     unblessedBooleanResult(resultGPR, node);
3696 }
3697
3698 void SpeculativeJIT::compileIsCellWithType(Node* node)
3699 {
3700     switch (node->child1().useKind()) {
3701     case UntypedUse: {
3702         JSValueOperand value(this, node->child1());
3703 #if USE(JSVALUE64)
3704         GPRTemporary result(this, Reuse, value);
3705 #else
3706         GPRTemporary result(this, Reuse, value, PayloadWord);
3707 #endif
3708
3709         JSValueRegs valueRegs = value.jsValueRegs();
3710         GPRReg resultGPR = result.gpr();
3711
3712         JITCompiler::Jump isNotCell = m_jit.branchIfNotCell(valueRegs);
3713
3714         m_jit.compare8(JITCompiler::Equal,
3715             JITCompiler::Address(valueRegs.payloadGPR(), JSCell::typeInfoTypeOffset()),
3716             TrustedImm32(node->queriedType()),
3717             resultGPR);
3718         blessBoolean(resultGPR);
3719         JITCompiler::Jump done = m_jit.jump();
3720
3721         isNotCell.link(&m_jit);
3722         moveFalseTo(resultGPR);
3723
3724         done.link(&m_jit);
3725         blessedBooleanResult(resultGPR, node);
3726         return;
3727     }
3728
3729     case CellUse: {
3730         SpeculateCellOperand cell(this, node->child1());
3731         GPRTemporary result(this, Reuse, cell);
3732
3733         GPRReg cellGPR = cell.gpr();
3734         GPRReg resultGPR = result.gpr();
3735
3736         m_jit.compare8(JITCompiler::Equal,
3737             JITCompiler::Address(cellGPR, JSCell::typeInfoTypeOffset()),
3738             TrustedImm32(node->queriedType()),
3739             resultGPR);
3740         blessBoolean(resultGPR);
3741         blessedBooleanResult(resultGPR, node);
3742         return;
3743     }
3744
3745     default:
3746         RELEASE_ASSERT_NOT_REACHED();
3747         break;
3748     }
3749 }
3750
3751 void SpeculativeJIT::compileIsTypedArrayView(Node* node)
3752 {
3753     JSValueOperand value(this, node->child1());
3754 #if USE(JSVALUE64)
3755     GPRTemporary result(this, Reuse, value);
3756 #else
3757     GPRTemporary result(this, Reuse, value, PayloadWord);
3758 #endif
3759
3760     JSValueRegs valueRegs = value.jsValueRegs();
3761     GPRReg resultGPR = result.gpr();
3762
3763     JITCompiler::Jump isNotCell = m_jit.branchIfNotCell(valueRegs);
3764
3765     m_jit.load8(JITCompiler::Address(valueRegs.payloadGPR(), JSCell::typeInfoTypeOffset()), resultGPR);
3766     m_jit.sub32(TrustedImm32(Int8ArrayType), resultGPR);
3767     m_jit.compare32(JITCompiler::BelowOrEqual,
3768         resultGPR,
3769         TrustedImm32(Float64ArrayType - Int8ArrayType),
3770         resultGPR);
3771     blessBoolean(resultGPR);
3772     JITCompiler::Jump done = m_jit.jump();
3773
3774     isNotCell.link(&m_jit);
3775     moveFalseTo(resultGPR);
3776
3777     done.link(&m_jit);
3778     blessedBooleanResult(resultGPR, node);
3779 }
3780
3781 void SpeculativeJIT::compileCallObjectConstructor(Node* node)
3782 {
3783     RELEASE_ASSERT(node->child1().useKind() == UntypedUse);
3784     JSValueOperand value(this, node->child1());
3785 #if USE(JSVALUE64)
3786     GPRTemporary result(this, Reuse, value);
3787 #else
3788     GPRTemporary result(this, Reuse, value, PayloadWord);
3789 #endif
3790
3791     JSValueRegs valueRegs = value.jsValueRegs();
3792     GPRReg resultGPR = result.gpr();
3793
3794     MacroAssembler::JumpList slowCases;
3795     slowCases.append(m_jit.branchIfNotCell(valueRegs));
3796     slowCases.append(m_jit.branchIfNotObject(valueRegs.payloadGPR()));
3797     m_jit.move(valueRegs.payloadGPR(), resultGPR);
3798
3799     addSlowPathGenerator(slowPathCall(slowCases, this, operationObjectConstructor, resultGPR, m_jit.globalObjectFor(node->origin.semantic), valueRegs));
3800     cellResult(resultGPR, node);
3801 }
3802
3803 void SpeculativeJIT::compileArithAdd(Node* node)
3804 {
3805     switch (node->binaryUseKind()) {
3806     case Int32Use: {
3807         ASSERT(!shouldCheckNegativeZero(node->arithMode()));
3808
3809         if (node->child2()->isInt32Constant()) {
3810             SpeculateInt32Operand op1(this, node->child1());
3811             GPRTemporary result(this, Reuse, op1);
3812
3813             GPRReg gpr1 = op1.gpr();
3814             int32_t imm2 = node->child2()->asInt32();
3815             GPRReg gprResult = result.gpr();
3816
3817             if (!shouldCheckOverflow(node->arithMode())) {
3818                 m_jit.add32(Imm32(imm2), gpr1, gprResult);
3819                 int32Result(gprResult, node);
3820                 return;
3821             }
3822
3823             MacroAssembler::Jump check = m_jit.branchAdd32(MacroAssembler::Overflow, gpr1, Imm32(imm2), gprResult);
3824             if (gpr1 == gprResult) {
3825                 speculationCheck(Overflow, JSValueRegs(), 0, check,
3826                     SpeculationRecovery(SpeculativeAddImmediate, gpr1, imm2));
3827             } else
3828                 speculationCheck(Overflow, JSValueRegs(), 0, check);
3829
3830             int32Result(gprResult, node);
3831             return;
3832         }
3833                 
3834         SpeculateInt32Operand op1(this, node->child1());
3835         SpeculateInt32Operand op2(this, node->child2());
3836         GPRTemporary result(this, Reuse, op1, op2);
3837
3838         GPRReg gpr1 = op1.gpr();
3839         GPRReg gpr2 = op2.gpr();
3840         GPRReg gprResult = result.gpr();
3841
3842         if (!shouldCheckOverflow(node->arithMode()))
3843             m_jit.add32(gpr1, gpr2, gprResult);
3844         else {
3845             MacroAssembler::Jump check = m_jit.branchAdd32(MacroAssembler::Overflow, gpr1, gpr2, gprResult);
3846                 
3847             if (gpr1 == gprResult)
3848                 speculationCheck(Overflow, JSValueRegs(), 0, check, SpeculationRecovery(SpeculativeAdd, gprResult, gpr2));
3849             else if (gpr2 == gprResult)
3850                 speculationCheck(Overflow, JSValueRegs(), 0, check, SpeculationRecovery(SpeculativeAdd, gprResult, gpr1));
3851             else
3852                 speculationCheck(Overflow, JSValueRegs(), 0, check);
3853         }
3854
3855         int32Result(gprResult, node);
3856         return;
3857     }
3858         
3859 #if USE(JSVALUE64)
3860     case Int52RepUse: {
3861         ASSERT(shouldCheckOverflow(node->arithMode()));
3862         ASSERT(!shouldCheckNegativeZero(node->arithMode()));
3863
3864         // Will we need an overflow check? If we can prove that neither input can be
3865         // Int52 then the overflow check will not be necessary.
3866         if (!m_state.forNode(node->child1()).couldBeType(SpecInt52Only)
3867             && !m_state.forNode(node->child2()).couldBeType(SpecInt52Only)) {
3868             SpeculateWhicheverInt52Operand op1(this, node->child1());
3869             SpeculateWhicheverInt52Operand op2(this, node->child2(), op1);
3870             GPRTemporary result(this, Reuse, op1);
3871             m_jit.add64(op1.gpr(), op2.gpr(), result.gpr());
3872             int52Result(result.gpr(), node, op1.format());
3873             return;
3874         }
3875         
3876         SpeculateInt52Operand op1(this, node->child1());
3877         SpeculateInt52Operand op2(this, node->child2());
3878         GPRTemporary result(this);
3879         m_jit.move(op1.gpr(), result.gpr());
3880         speculationCheck(
3881             Int52Overflow, JSValueRegs(), 0,
3882             m_jit.branchAdd64(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
3883         int52Result(result.gpr(), node);
3884         return;
3885     }
3886 #endif // USE(JSVALUE64)
3887     
3888