We should have a more concise way of determining when we're varargs calling a functio...
[WebKit-https.git] / Source / JavaScriptCore / dfg / DFGSpeculativeJIT.cpp
1 /*
2  * Copyright (C) 2011-2016 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "DFGSpeculativeJIT.h"
28
29 #if ENABLE(DFG_JIT)
30
31 #include "BinarySwitch.h"
32 #include "DFGAbstractInterpreterInlines.h"
33 #include "DFGArrayifySlowPathGenerator.h"
34 #include "DFGCallArrayAllocatorSlowPathGenerator.h"
35 #include "DFGCallCreateDirectArgumentsSlowPathGenerator.h"
36 #include "DFGCapabilities.h"
37 #include "DFGDOMJITPatchpointParams.h"
38 #include "DFGMayExit.h"
39 #include "DFGOSRExitFuzz.h"
40 #include "DFGSaneStringGetByValSlowPathGenerator.h"
41 #include "DFGSlowPathGenerator.h"
42 #include "DOMJITPatchpoint.h"
43 #include "DirectArguments.h"
44 #include "JITAddGenerator.h"
45 #include "JITBitAndGenerator.h"
46 #include "JITBitOrGenerator.h"
47 #include "JITBitXorGenerator.h"
48 #include "JITDivGenerator.h"
49 #include "JITLeftShiftGenerator.h"
50 #include "JITMulGenerator.h"
51 #include "JITRightShiftGenerator.h"
52 #include "JITSubGenerator.h"
53 #include "JSCInlines.h"
54 #include "JSEnvironmentRecord.h"
55 #include "JSFixedArray.h"
56 #include "JSGeneratorFunction.h"
57 #include "JSLexicalEnvironment.h"
58 #include "LinkBuffer.h"
59 #include "RegExpConstructor.h"
60 #include "ScopedArguments.h"
61 #include "ScratchRegisterAllocator.h"
62 #include "WriteBarrierBuffer.h"
63 #include <wtf/BitVector.h>
64 #include <wtf/Box.h>
65 #include <wtf/MathExtras.h>
66
67 namespace JSC { namespace DFG {
68
69 SpeculativeJIT::SpeculativeJIT(JITCompiler& jit)
70     : m_compileOkay(true)
71     , m_jit(jit)
72     , m_currentNode(0)
73     , m_lastGeneratedNode(LastNodeType)
74     , m_indexInBlock(0)
75     , m_generationInfo(m_jit.graph().frameRegisterCount())
76     , m_state(m_jit.graph())
77     , m_interpreter(m_jit.graph(), m_state)
78     , m_stream(&jit.jitCode()->variableEventStream)
79     , m_minifiedGraph(&jit.jitCode()->minifiedDFG)
80 {
81 }
82
83 SpeculativeJIT::~SpeculativeJIT()
84 {
85 }
86
87 void SpeculativeJIT::emitAllocateRawObject(GPRReg resultGPR, Structure* structure, GPRReg storageGPR, unsigned numElements, unsigned vectorLength)
88 {
89     IndexingType indexingType = structure->indexingType();
90     bool hasIndexingHeader = hasIndexedProperties(indexingType);
91
92     unsigned inlineCapacity = structure->inlineCapacity();
93     unsigned outOfLineCapacity = structure->outOfLineCapacity();
94     
95     GPRTemporary scratch(this);
96     GPRTemporary scratch2(this);
97     GPRReg scratchGPR = scratch.gpr();
98     GPRReg scratch2GPR = scratch2.gpr();
99
100     ASSERT(vectorLength >= numElements);
101     vectorLength = Butterfly::optimalContiguousVectorLength(structure, vectorLength);
102     
103     JITCompiler::JumpList slowCases;
104
105     size_t size = 0;
106     if (hasIndexingHeader)
107         size += vectorLength * sizeof(JSValue) + sizeof(IndexingHeader);
108     size += outOfLineCapacity * sizeof(JSValue);
109
110     m_jit.move(TrustedImmPtr(0), storageGPR);
111     
112     if (size) {
113         if (MarkedAllocator* allocator = m_jit.vm()->heap.allocatorForAuxiliaryData(size)) {
114             m_jit.move(TrustedImmPtr(allocator), scratchGPR);
115             m_jit.emitAllocate(storageGPR, allocator, scratchGPR, scratch2GPR, slowCases);
116             
117             m_jit.addPtr(
118                 TrustedImm32(outOfLineCapacity * sizeof(JSValue) + sizeof(IndexingHeader)),
119                 storageGPR);
120             
121             if (hasIndexingHeader)
122                 m_jit.store32(TrustedImm32(vectorLength), MacroAssembler::Address(storageGPR, Butterfly::offsetOfVectorLength()));
123         } else
124             slowCases.append(m_jit.jump());
125     }
126
127     size_t allocationSize = JSFinalObject::allocationSize(inlineCapacity);
128     MarkedAllocator* allocatorPtr = m_jit.vm()->heap.allocatorForObjectWithoutDestructor(allocationSize);
129     if (allocatorPtr) {
130         m_jit.move(TrustedImmPtr(allocatorPtr), scratchGPR);
131         emitAllocateJSObject(resultGPR, allocatorPtr, scratchGPR, TrustedImmPtr(structure), storageGPR, scratch2GPR, slowCases);
132     } else
133         slowCases.append(m_jit.jump());
134
135     // I want a slow path that also loads out the storage pointer, and that's
136     // what this custom CallArrayAllocatorSlowPathGenerator gives me. It's a lot
137     // of work for a very small piece of functionality. :-/
138     addSlowPathGenerator(std::make_unique<CallArrayAllocatorSlowPathGenerator>(
139         slowCases, this, operationNewRawObject, resultGPR, storageGPR,
140         structure, vectorLength));
141
142     if (numElements < vectorLength) {
143 #if USE(JSVALUE64)
144         if (hasDouble(structure->indexingType()))
145             m_jit.move(TrustedImm64(bitwise_cast<int64_t>(PNaN)), scratchGPR);
146         else
147             m_jit.move(TrustedImm64(JSValue::encode(JSValue())), scratchGPR);
148         for (unsigned i = numElements; i < vectorLength; ++i)
149             m_jit.store64(scratchGPR, MacroAssembler::Address(storageGPR, sizeof(double) * i));
150 #else
151         EncodedValueDescriptor value;
152         if (hasDouble(structure->indexingType()))
153             value.asInt64 = JSValue::encode(JSValue(JSValue::EncodeAsDouble, PNaN));
154         else
155             value.asInt64 = JSValue::encode(JSValue());
156         for (unsigned i = numElements; i < vectorLength; ++i) {
157             m_jit.store32(TrustedImm32(value.asBits.tag), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
158             m_jit.store32(TrustedImm32(value.asBits.payload), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
159         }
160 #endif
161     }
162     
163     if (hasIndexingHeader)
164         m_jit.store32(TrustedImm32(numElements), MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()));
165 }
166
167 void SpeculativeJIT::emitGetLength(InlineCallFrame* inlineCallFrame, GPRReg lengthGPR, bool includeThis)
168 {
169     if (inlineCallFrame && !inlineCallFrame->isVarargs())
170         m_jit.move(TrustedImm32(inlineCallFrame->arguments.size() - !includeThis), lengthGPR);
171     else {
172         VirtualRegister argumentCountRegister = m_jit.argumentCount(inlineCallFrame);
173         m_jit.load32(JITCompiler::payloadFor(argumentCountRegister), lengthGPR);
174         if (!includeThis)
175             m_jit.sub32(TrustedImm32(1), lengthGPR);
176     }
177 }
178
179 void SpeculativeJIT::emitGetLength(CodeOrigin origin, GPRReg lengthGPR, bool includeThis)
180 {
181     emitGetLength(origin.inlineCallFrame, lengthGPR, includeThis);
182 }
183
184 void SpeculativeJIT::emitGetCallee(CodeOrigin origin, GPRReg calleeGPR)
185 {
186     if (origin.inlineCallFrame) {
187         if (origin.inlineCallFrame->isClosureCall) {
188             m_jit.loadPtr(
189                 JITCompiler::addressFor(origin.inlineCallFrame->calleeRecovery.virtualRegister()),
190                 calleeGPR);
191         } else {
192             m_jit.move(
193                 TrustedImmPtr(origin.inlineCallFrame->calleeRecovery.constant().asCell()),
194                 calleeGPR);
195         }
196     } else
197         m_jit.loadPtr(JITCompiler::addressFor(CallFrameSlot::callee), calleeGPR);
198 }
199
200 void SpeculativeJIT::emitGetArgumentStart(CodeOrigin origin, GPRReg startGPR)
201 {
202     m_jit.addPtr(
203         TrustedImm32(
204             JITCompiler::argumentsStart(origin).offset() * static_cast<int>(sizeof(Register))),
205         GPRInfo::callFrameRegister, startGPR);
206 }
207
208 MacroAssembler::Jump SpeculativeJIT::emitOSRExitFuzzCheck()
209 {
210     if (!Options::useOSRExitFuzz()
211         || !canUseOSRExitFuzzing(m_jit.graph().baselineCodeBlockFor(m_origin.semantic))
212         || !doOSRExitFuzzing())
213         return MacroAssembler::Jump();
214     
215     MacroAssembler::Jump result;
216     
217     m_jit.pushToSave(GPRInfo::regT0);
218     m_jit.load32(&g_numberOfOSRExitFuzzChecks, GPRInfo::regT0);
219     m_jit.add32(TrustedImm32(1), GPRInfo::regT0);
220     m_jit.store32(GPRInfo::regT0, &g_numberOfOSRExitFuzzChecks);
221     unsigned atOrAfter = Options::fireOSRExitFuzzAtOrAfter();
222     unsigned at = Options::fireOSRExitFuzzAt();
223     if (at || atOrAfter) {
224         unsigned threshold;
225         MacroAssembler::RelationalCondition condition;
226         if (atOrAfter) {
227             threshold = atOrAfter;
228             condition = MacroAssembler::Below;
229         } else {
230             threshold = at;
231             condition = MacroAssembler::NotEqual;
232         }
233         MacroAssembler::Jump ok = m_jit.branch32(
234             condition, GPRInfo::regT0, MacroAssembler::TrustedImm32(threshold));
235         m_jit.popToRestore(GPRInfo::regT0);
236         result = m_jit.jump();
237         ok.link(&m_jit);
238     }
239     m_jit.popToRestore(GPRInfo::regT0);
240     
241     return result;
242 }
243
244 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail)
245 {
246     if (!m_compileOkay)
247         return;
248     JITCompiler::Jump fuzzJump = emitOSRExitFuzzCheck();
249     if (fuzzJump.isSet()) {
250         JITCompiler::JumpList jumpsToFail;
251         jumpsToFail.append(fuzzJump);
252         jumpsToFail.append(jumpToFail);
253         m_jit.appendExitInfo(jumpsToFail);
254     } else
255         m_jit.appendExitInfo(jumpToFail);
256     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(m_currentNode, node), this, m_stream->size()));
257 }
258
259 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, const MacroAssembler::JumpList& jumpsToFail)
260 {
261     if (!m_compileOkay)
262         return;
263     JITCompiler::Jump fuzzJump = emitOSRExitFuzzCheck();
264     if (fuzzJump.isSet()) {
265         JITCompiler::JumpList myJumpsToFail;
266         myJumpsToFail.append(jumpsToFail);
267         myJumpsToFail.append(fuzzJump);
268         m_jit.appendExitInfo(myJumpsToFail);
269     } else
270         m_jit.appendExitInfo(jumpsToFail);
271     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(m_currentNode, node), this, m_stream->size()));
272 }
273
274 OSRExitJumpPlaceholder SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node)
275 {
276     if (!m_compileOkay)
277         return OSRExitJumpPlaceholder();
278     unsigned index = m_jit.jitCode()->osrExit.size();
279     m_jit.appendExitInfo();
280     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(m_currentNode, node), this, m_stream->size()));
281     return OSRExitJumpPlaceholder(index);
282 }
283
284 OSRExitJumpPlaceholder SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse)
285 {
286     return speculationCheck(kind, jsValueSource, nodeUse.node());
287 }
288
289 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail)
290 {
291     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail);
292 }
293
294 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, const MacroAssembler::JumpList& jumpsToFail)
295 {
296     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpsToFail);
297 }
298
299 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
300 {
301     if (!m_compileOkay)
302         return;
303     unsigned recoveryIndex = m_jit.jitCode()->appendSpeculationRecovery(recovery);
304     m_jit.appendExitInfo(jumpToFail);
305     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(m_currentNode, node), this, m_stream->size(), recoveryIndex));
306 }
307
308 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
309 {
310     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail, recovery);
311 }
312
313 void SpeculativeJIT::emitInvalidationPoint(Node* node)
314 {
315     if (!m_compileOkay)
316         return;
317     OSRExitCompilationInfo& info = m_jit.appendExitInfo(JITCompiler::JumpList());
318     m_jit.jitCode()->appendOSRExit(OSRExit(
319         UncountableInvalidation, JSValueSource(), MethodOfGettingAValueProfile(),
320         this, m_stream->size()));
321     info.m_replacementSource = m_jit.watchpointLabel();
322     ASSERT(info.m_replacementSource.isSet());
323     noResult(node);
324 }
325
326 void SpeculativeJIT::unreachable(Node* node)
327 {
328     m_compileOkay = false;
329     m_jit.abortWithReason(DFGUnreachableNode, node->op());
330 }
331
332 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Node* node)
333 {
334     if (!m_compileOkay)
335         return;
336     speculationCheck(kind, jsValueRegs, node, m_jit.jump());
337     m_compileOkay = false;
338     if (verboseCompilationEnabled())
339         dataLog("Bailing compilation.\n");
340 }
341
342 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Edge nodeUse)
343 {
344     terminateSpeculativeExecution(kind, jsValueRegs, nodeUse.node());
345 }
346
347 void SpeculativeJIT::typeCheck(JSValueSource source, Edge edge, SpeculatedType typesPassedThrough, MacroAssembler::Jump jumpToFail, ExitKind exitKind)
348 {
349     ASSERT(needsTypeCheck(edge, typesPassedThrough));
350     m_interpreter.filter(edge, typesPassedThrough);
351     speculationCheck(exitKind, source, edge.node(), jumpToFail);
352 }
353
354 RegisterSet SpeculativeJIT::usedRegisters()
355 {
356     RegisterSet result;
357     
358     for (unsigned i = GPRInfo::numberOfRegisters; i--;) {
359         GPRReg gpr = GPRInfo::toRegister(i);
360         if (m_gprs.isInUse(gpr))
361             result.set(gpr);
362     }
363     for (unsigned i = FPRInfo::numberOfRegisters; i--;) {
364         FPRReg fpr = FPRInfo::toRegister(i);
365         if (m_fprs.isInUse(fpr))
366             result.set(fpr);
367     }
368     
369     result.merge(RegisterSet::stubUnavailableRegisters());
370     
371     return result;
372 }
373
374 void SpeculativeJIT::addSlowPathGenerator(std::unique_ptr<SlowPathGenerator> slowPathGenerator)
375 {
376     m_slowPathGenerators.append(WTFMove(slowPathGenerator));
377 }
378
379 void SpeculativeJIT::addSlowPathGenerator(std::function<void()> lambda)
380 {
381     m_slowPathLambdas.append(SlowPathLambda{ lambda, m_currentNode, static_cast<unsigned>(m_stream->size()) });
382 }
383
384 void SpeculativeJIT::runSlowPathGenerators(PCToCodeOriginMapBuilder& pcToCodeOriginMapBuilder)
385 {
386     for (auto& slowPathGenerator : m_slowPathGenerators) {
387         pcToCodeOriginMapBuilder.appendItem(m_jit.labelIgnoringWatchpoints(), slowPathGenerator->origin().semantic);
388         slowPathGenerator->generate(this);
389     }
390     for (auto& slowPathLambda : m_slowPathLambdas) {
391         Node* currentNode = slowPathLambda.currentNode;
392         m_currentNode = currentNode;
393         m_outOfLineStreamIndex = slowPathLambda.streamIndex;
394         pcToCodeOriginMapBuilder.appendItem(m_jit.labelIgnoringWatchpoints(), currentNode->origin.semantic);
395         slowPathLambda.generator();
396         m_outOfLineStreamIndex = Nullopt;
397     }
398 }
399
400 void SpeculativeJIT::clearGenerationInfo()
401 {
402     for (unsigned i = 0; i < m_generationInfo.size(); ++i)
403         m_generationInfo[i] = GenerationInfo();
404     m_gprs = RegisterBank<GPRInfo>();
405     m_fprs = RegisterBank<FPRInfo>();
406 }
407
408 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForGPR(VirtualRegister spillMe, GPRReg source)
409 {
410     GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
411     Node* node = info.node();
412     DataFormat registerFormat = info.registerFormat();
413     ASSERT(registerFormat != DataFormatNone);
414     ASSERT(registerFormat != DataFormatDouble);
415         
416     SilentSpillAction spillAction;
417     SilentFillAction fillAction;
418         
419     if (!info.needsSpill())
420         spillAction = DoNothingForSpill;
421     else {
422 #if USE(JSVALUE64)
423         ASSERT(info.gpr() == source);
424         if (registerFormat == DataFormatInt32)
425             spillAction = Store32Payload;
426         else if (registerFormat == DataFormatCell || registerFormat == DataFormatStorage)
427             spillAction = StorePtr;
428         else if (registerFormat == DataFormatInt52 || registerFormat == DataFormatStrictInt52)
429             spillAction = Store64;
430         else {
431             ASSERT(registerFormat & DataFormatJS);
432             spillAction = Store64;
433         }
434 #elif USE(JSVALUE32_64)
435         if (registerFormat & DataFormatJS) {
436             ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
437             spillAction = source == info.tagGPR() ? Store32Tag : Store32Payload;
438         } else {
439             ASSERT(info.gpr() == source);
440             spillAction = Store32Payload;
441         }
442 #endif
443     }
444         
445     if (registerFormat == DataFormatInt32) {
446         ASSERT(info.gpr() == source);
447         ASSERT(isJSInt32(info.registerFormat()));
448         if (node->hasConstant()) {
449             ASSERT(node->isInt32Constant());
450             fillAction = SetInt32Constant;
451         } else
452             fillAction = Load32Payload;
453     } else if (registerFormat == DataFormatBoolean) {
454 #if USE(JSVALUE64)
455         RELEASE_ASSERT_NOT_REACHED();
456 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
457         fillAction = DoNothingForFill;
458 #endif
459 #elif USE(JSVALUE32_64)
460         ASSERT(info.gpr() == source);
461         if (node->hasConstant()) {
462             ASSERT(node->isBooleanConstant());
463             fillAction = SetBooleanConstant;
464         } else
465             fillAction = Load32Payload;
466 #endif
467     } else if (registerFormat == DataFormatCell) {
468         ASSERT(info.gpr() == source);
469         if (node->hasConstant()) {
470             DFG_ASSERT(m_jit.graph(), m_currentNode, node->isCellConstant());
471             node->asCell(); // To get the assertion.
472             fillAction = SetCellConstant;
473         } else {
474 #if USE(JSVALUE64)
475             fillAction = LoadPtr;
476 #else
477             fillAction = Load32Payload;
478 #endif
479         }
480     } else if (registerFormat == DataFormatStorage) {
481         ASSERT(info.gpr() == source);
482         fillAction = LoadPtr;
483     } else if (registerFormat == DataFormatInt52) {
484         if (node->hasConstant())
485             fillAction = SetInt52Constant;
486         else if (info.spillFormat() == DataFormatInt52)
487             fillAction = Load64;
488         else if (info.spillFormat() == DataFormatStrictInt52)
489             fillAction = Load64ShiftInt52Left;
490         else if (info.spillFormat() == DataFormatNone)
491             fillAction = Load64;
492         else {
493             RELEASE_ASSERT_NOT_REACHED();
494 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
495             fillAction = Load64; // Make GCC happy.
496 #endif
497         }
498     } else if (registerFormat == DataFormatStrictInt52) {
499         if (node->hasConstant())
500             fillAction = SetStrictInt52Constant;
501         else if (info.spillFormat() == DataFormatInt52)
502             fillAction = Load64ShiftInt52Right;
503         else if (info.spillFormat() == DataFormatStrictInt52)
504             fillAction = Load64;
505         else if (info.spillFormat() == DataFormatNone)
506             fillAction = Load64;
507         else {
508             RELEASE_ASSERT_NOT_REACHED();
509 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
510             fillAction = Load64; // Make GCC happy.
511 #endif
512         }
513     } else {
514         ASSERT(registerFormat & DataFormatJS);
515 #if USE(JSVALUE64)
516         ASSERT(info.gpr() == source);
517         if (node->hasConstant()) {
518             if (node->isCellConstant())
519                 fillAction = SetTrustedJSConstant;
520             else
521                 fillAction = SetJSConstant;
522         } else if (info.spillFormat() == DataFormatInt32) {
523             ASSERT(registerFormat == DataFormatJSInt32);
524             fillAction = Load32PayloadBoxInt;
525         } else
526             fillAction = Load64;
527 #else
528         ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
529         if (node->hasConstant())
530             fillAction = info.tagGPR() == source ? SetJSConstantTag : SetJSConstantPayload;
531         else if (info.payloadGPR() == source)
532             fillAction = Load32Payload;
533         else { // Fill the Tag
534             switch (info.spillFormat()) {
535             case DataFormatInt32:
536                 ASSERT(registerFormat == DataFormatJSInt32);
537                 fillAction = SetInt32Tag;
538                 break;
539             case DataFormatCell:
540                 ASSERT(registerFormat == DataFormatJSCell);
541                 fillAction = SetCellTag;
542                 break;
543             case DataFormatBoolean:
544                 ASSERT(registerFormat == DataFormatJSBoolean);
545                 fillAction = SetBooleanTag;
546                 break;
547             default:
548                 fillAction = Load32Tag;
549                 break;
550             }
551         }
552 #endif
553     }
554         
555     return SilentRegisterSavePlan(spillAction, fillAction, node, source);
556 }
557     
558 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForFPR(VirtualRegister spillMe, FPRReg source)
559 {
560     GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
561     Node* node = info.node();
562     ASSERT(info.registerFormat() == DataFormatDouble);
563
564     SilentSpillAction spillAction;
565     SilentFillAction fillAction;
566         
567     if (!info.needsSpill())
568         spillAction = DoNothingForSpill;
569     else {
570         ASSERT(!node->hasConstant());
571         ASSERT(info.spillFormat() == DataFormatNone);
572         ASSERT(info.fpr() == source);
573         spillAction = StoreDouble;
574     }
575         
576 #if USE(JSVALUE64)
577     if (node->hasConstant()) {
578         node->asNumber(); // To get the assertion.
579         fillAction = SetDoubleConstant;
580     } else {
581         ASSERT(info.spillFormat() == DataFormatNone || info.spillFormat() == DataFormatDouble);
582         fillAction = LoadDouble;
583     }
584 #elif USE(JSVALUE32_64)
585     ASSERT(info.registerFormat() == DataFormatDouble);
586     if (node->hasConstant()) {
587         node->asNumber(); // To get the assertion.
588         fillAction = SetDoubleConstant;
589     } else
590         fillAction = LoadDouble;
591 #endif
592
593     return SilentRegisterSavePlan(spillAction, fillAction, node, source);
594 }
595     
596 void SpeculativeJIT::silentSpill(const SilentRegisterSavePlan& plan)
597 {
598     switch (plan.spillAction()) {
599     case DoNothingForSpill:
600         break;
601     case Store32Tag:
602         m_jit.store32(plan.gpr(), JITCompiler::tagFor(plan.node()->virtualRegister()));
603         break;
604     case Store32Payload:
605         m_jit.store32(plan.gpr(), JITCompiler::payloadFor(plan.node()->virtualRegister()));
606         break;
607     case StorePtr:
608         m_jit.storePtr(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
609         break;
610 #if USE(JSVALUE64)
611     case Store64:
612         m_jit.store64(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
613         break;
614 #endif
615     case StoreDouble:
616         m_jit.storeDouble(plan.fpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
617         break;
618     default:
619         RELEASE_ASSERT_NOT_REACHED();
620     }
621 }
622     
623 void SpeculativeJIT::silentFill(const SilentRegisterSavePlan& plan, GPRReg canTrample)
624 {
625 #if USE(JSVALUE32_64)
626     UNUSED_PARAM(canTrample);
627 #endif
628     switch (plan.fillAction()) {
629     case DoNothingForFill:
630         break;
631     case SetInt32Constant:
632         m_jit.move(Imm32(plan.node()->asInt32()), plan.gpr());
633         break;
634 #if USE(JSVALUE64)
635     case SetInt52Constant:
636         m_jit.move(Imm64(plan.node()->asAnyInt() << JSValue::int52ShiftAmount), plan.gpr());
637         break;
638     case SetStrictInt52Constant:
639         m_jit.move(Imm64(plan.node()->asAnyInt()), plan.gpr());
640         break;
641 #endif // USE(JSVALUE64)
642     case SetBooleanConstant:
643         m_jit.move(TrustedImm32(plan.node()->asBoolean()), plan.gpr());
644         break;
645     case SetCellConstant:
646         m_jit.move(TrustedImmPtr(plan.node()->asCell()), plan.gpr());
647         break;
648 #if USE(JSVALUE64)
649     case SetTrustedJSConstant:
650         m_jit.move(valueOfJSConstantAsImm64(plan.node()).asTrustedImm64(), plan.gpr());
651         break;
652     case SetJSConstant:
653         m_jit.move(valueOfJSConstantAsImm64(plan.node()), plan.gpr());
654         break;
655     case SetDoubleConstant:
656         m_jit.move(Imm64(reinterpretDoubleToInt64(plan.node()->asNumber())), canTrample);
657         m_jit.move64ToDouble(canTrample, plan.fpr());
658         break;
659     case Load32PayloadBoxInt:
660         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
661         m_jit.or64(GPRInfo::tagTypeNumberRegister, plan.gpr());
662         break;
663     case Load32PayloadConvertToInt52:
664         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
665         m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
666         m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
667         break;
668     case Load32PayloadSignExtend:
669         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
670         m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
671         break;
672 #else
673     case SetJSConstantTag:
674         m_jit.move(Imm32(plan.node()->asJSValue().tag()), plan.gpr());
675         break;
676     case SetJSConstantPayload:
677         m_jit.move(Imm32(plan.node()->asJSValue().payload()), plan.gpr());
678         break;
679     case SetInt32Tag:
680         m_jit.move(TrustedImm32(JSValue::Int32Tag), plan.gpr());
681         break;
682     case SetCellTag:
683         m_jit.move(TrustedImm32(JSValue::CellTag), plan.gpr());
684         break;
685     case SetBooleanTag:
686         m_jit.move(TrustedImm32(JSValue::BooleanTag), plan.gpr());
687         break;
688     case SetDoubleConstant:
689         m_jit.loadDouble(TrustedImmPtr(m_jit.addressOfDoubleConstant(plan.node())), plan.fpr());
690         break;
691 #endif
692     case Load32Tag:
693         m_jit.load32(JITCompiler::tagFor(plan.node()->virtualRegister()), plan.gpr());
694         break;
695     case Load32Payload:
696         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
697         break;
698     case LoadPtr:
699         m_jit.loadPtr(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
700         break;
701 #if USE(JSVALUE64)
702     case Load64:
703         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
704         break;
705     case Load64ShiftInt52Right:
706         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
707         m_jit.rshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
708         break;
709     case Load64ShiftInt52Left:
710         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
711         m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
712         break;
713 #endif
714     case LoadDouble:
715         m_jit.loadDouble(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.fpr());
716         break;
717     default:
718         RELEASE_ASSERT_NOT_REACHED();
719     }
720 }
721     
722 JITCompiler::Jump SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode, IndexingType shape)
723 {
724     switch (arrayMode.arrayClass()) {
725     case Array::OriginalArray: {
726         CRASH();
727 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
728         JITCompiler::Jump result; // I already know that VC++ takes unkindly to the expression "return Jump()", so I'm doing it this way in anticipation of someone eventually using VC++ to compile the DFG.
729         return result;
730 #endif
731     }
732         
733     case Array::Array:
734         m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
735         return m_jit.branch32(
736             MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | shape));
737         
738     case Array::NonArray:
739     case Array::OriginalNonArray:
740         m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
741         return m_jit.branch32(
742             MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
743         
744     case Array::PossiblyArray:
745         m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
746         return m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
747     }
748     
749     RELEASE_ASSERT_NOT_REACHED();
750     return JITCompiler::Jump();
751 }
752
753 JITCompiler::JumpList SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode)
754 {
755     JITCompiler::JumpList result;
756     
757     switch (arrayMode.type()) {
758     case Array::Int32:
759         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, Int32Shape);
760
761     case Array::Double:
762         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, DoubleShape);
763
764     case Array::Contiguous:
765         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, ContiguousShape);
766
767     case Array::Undecided:
768         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, UndecidedShape);
769
770     case Array::ArrayStorage:
771     case Array::SlowPutArrayStorage: {
772         ASSERT(!arrayMode.isJSArrayWithOriginalStructure());
773         
774         if (arrayMode.isJSArray()) {
775             if (arrayMode.isSlowPut()) {
776                 result.append(
777                     m_jit.branchTest32(
778                         MacroAssembler::Zero, tempGPR, MacroAssembler::TrustedImm32(IsArray)));
779                 m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
780                 m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
781                 result.append(
782                     m_jit.branch32(
783                         MacroAssembler::Above, tempGPR,
784                         TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
785                 break;
786             }
787             m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
788             result.append(
789                 m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | ArrayStorageShape)));
790             break;
791         }
792         m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
793         if (arrayMode.isSlowPut()) {
794             m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
795             result.append(
796                 m_jit.branch32(
797                     MacroAssembler::Above, tempGPR,
798                     TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
799             break;
800         }
801         result.append(
802             m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(ArrayStorageShape)));
803         break;
804     }
805     default:
806         CRASH();
807         break;
808     }
809     
810     return result;
811 }
812
813 void SpeculativeJIT::checkArray(Node* node)
814 {
815     ASSERT(node->arrayMode().isSpecific());
816     ASSERT(!node->arrayMode().doesConversion());
817     
818     SpeculateCellOperand base(this, node->child1());
819     GPRReg baseReg = base.gpr();
820     
821     if (node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1()))) {
822         noResult(m_currentNode);
823         return;
824     }
825     
826     const ClassInfo* expectedClassInfo = 0;
827     
828     switch (node->arrayMode().type()) {
829     case Array::AnyTypedArray:
830     case Array::String:
831         RELEASE_ASSERT_NOT_REACHED(); // Should have been a Phantom(String:)
832         break;
833     case Array::Int32:
834     case Array::Double:
835     case Array::Contiguous:
836     case Array::Undecided:
837     case Array::ArrayStorage:
838     case Array::SlowPutArrayStorage: {
839         GPRTemporary temp(this);
840         GPRReg tempGPR = temp.gpr();
841         m_jit.load8(MacroAssembler::Address(baseReg, JSCell::indexingTypeOffset()), tempGPR);
842         speculationCheck(
843             BadIndexingType, JSValueSource::unboxedCell(baseReg), 0,
844             jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
845         
846         noResult(m_currentNode);
847         return;
848     }
849     case Array::DirectArguments:
850         speculateCellTypeWithoutTypeFiltering(node->child1(), baseReg, DirectArgumentsType);
851         noResult(m_currentNode);
852         return;
853     case Array::ScopedArguments:
854         speculateCellTypeWithoutTypeFiltering(node->child1(), baseReg, ScopedArgumentsType);
855         noResult(m_currentNode);
856         return;
857     default:
858         speculateCellTypeWithoutTypeFiltering(
859             node->child1(), baseReg,
860             typeForTypedArrayType(node->arrayMode().typedArrayType()));
861         noResult(m_currentNode);
862         return;
863     }
864     
865     RELEASE_ASSERT(expectedClassInfo);
866     
867     GPRTemporary temp(this);
868     GPRTemporary temp2(this);
869     m_jit.emitLoadStructure(baseReg, temp.gpr(), temp2.gpr());
870     speculationCheck(
871         BadType, JSValueSource::unboxedCell(baseReg), node,
872         m_jit.branchPtr(
873             MacroAssembler::NotEqual,
874             MacroAssembler::Address(temp.gpr(), Structure::classInfoOffset()),
875             MacroAssembler::TrustedImmPtr(expectedClassInfo)));
876     
877     noResult(m_currentNode);
878 }
879
880 void SpeculativeJIT::arrayify(Node* node, GPRReg baseReg, GPRReg propertyReg)
881 {
882     ASSERT(node->arrayMode().doesConversion());
883     
884     GPRTemporary temp(this);
885     GPRTemporary structure;
886     GPRReg tempGPR = temp.gpr();
887     GPRReg structureGPR = InvalidGPRReg;
888     
889     if (node->op() != ArrayifyToStructure) {
890         GPRTemporary realStructure(this);
891         structure.adopt(realStructure);
892         structureGPR = structure.gpr();
893     }
894         
895     // We can skip all that comes next if we already have array storage.
896     MacroAssembler::JumpList slowPath;
897     
898     if (node->op() == ArrayifyToStructure) {
899         slowPath.append(m_jit.branchWeakStructure(
900             JITCompiler::NotEqual,
901             JITCompiler::Address(baseReg, JSCell::structureIDOffset()),
902             node->structure()));
903     } else {
904         m_jit.load8(
905             MacroAssembler::Address(baseReg, JSCell::indexingTypeOffset()), tempGPR);
906         
907         slowPath.append(jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
908     }
909     
910     addSlowPathGenerator(std::make_unique<ArrayifySlowPathGenerator>(
911         slowPath, this, node, baseReg, propertyReg, tempGPR, structureGPR));
912     
913     noResult(m_currentNode);
914 }
915
916 void SpeculativeJIT::arrayify(Node* node)
917 {
918     ASSERT(node->arrayMode().isSpecific());
919     
920     SpeculateCellOperand base(this, node->child1());
921     
922     if (!node->child2()) {
923         arrayify(node, base.gpr(), InvalidGPRReg);
924         return;
925     }
926     
927     SpeculateInt32Operand property(this, node->child2());
928     
929     arrayify(node, base.gpr(), property.gpr());
930 }
931
932 GPRReg SpeculativeJIT::fillStorage(Edge edge)
933 {
934     VirtualRegister virtualRegister = edge->virtualRegister();
935     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
936     
937     switch (info.registerFormat()) {
938     case DataFormatNone: {
939         if (info.spillFormat() == DataFormatStorage) {
940             GPRReg gpr = allocate();
941             m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
942             m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr);
943             info.fillStorage(*m_stream, gpr);
944             return gpr;
945         }
946         
947         // Must be a cell; fill it as a cell and then return the pointer.
948         return fillSpeculateCell(edge);
949     }
950         
951     case DataFormatStorage: {
952         GPRReg gpr = info.gpr();
953         m_gprs.lock(gpr);
954         return gpr;
955     }
956         
957     default:
958         return fillSpeculateCell(edge);
959     }
960 }
961
962 void SpeculativeJIT::useChildren(Node* node)
963 {
964     if (node->flags() & NodeHasVarArgs) {
965         for (unsigned childIdx = node->firstChild(); childIdx < node->firstChild() + node->numChildren(); childIdx++) {
966             if (!!m_jit.graph().m_varArgChildren[childIdx])
967                 use(m_jit.graph().m_varArgChildren[childIdx]);
968         }
969     } else {
970         Edge child1 = node->child1();
971         if (!child1) {
972             ASSERT(!node->child2() && !node->child3());
973             return;
974         }
975         use(child1);
976         
977         Edge child2 = node->child2();
978         if (!child2) {
979             ASSERT(!node->child3());
980             return;
981         }
982         use(child2);
983         
984         Edge child3 = node->child3();
985         if (!child3)
986             return;
987         use(child3);
988     }
989 }
990
991 void SpeculativeJIT::compileTryGetById(Node* node)
992 {
993     switch (node->child1().useKind()) {
994     case CellUse: {
995         SpeculateCellOperand base(this, node->child1());
996         JSValueRegsTemporary result(this, Reuse, base);
997
998         JSValueRegs baseRegs = JSValueRegs::payloadOnly(base.gpr());
999         JSValueRegs resultRegs = result.regs();
1000
1001         base.use();
1002
1003         cachedGetById(node->origin.semantic, baseRegs, resultRegs, node->identifierNumber(), JITCompiler::Jump(), NeedToSpill, AccessType::TryGet);
1004
1005         jsValueResult(resultRegs, node, DataFormatJS, UseChildrenCalledExplicitly);
1006         break;
1007     }
1008
1009     case UntypedUse: {
1010         JSValueOperand base(this, node->child1());
1011         JSValueRegsTemporary result(this, Reuse, base);
1012
1013         JSValueRegs baseRegs = base.jsValueRegs();
1014         JSValueRegs resultRegs = result.regs();
1015
1016         base.use();
1017
1018         JITCompiler::Jump notCell = m_jit.branchIfNotCell(baseRegs);
1019
1020         cachedGetById(node->origin.semantic, baseRegs, resultRegs, node->identifierNumber(), notCell, NeedToSpill, AccessType::TryGet);
1021
1022         jsValueResult(resultRegs, node, DataFormatJS, UseChildrenCalledExplicitly);
1023         break;
1024     }
1025
1026     default:
1027         DFG_CRASH(m_jit.graph(), node, "Bad use kind");
1028         break;
1029     } 
1030 }
1031
1032 void SpeculativeJIT::compilePureGetById(Node* node)
1033 {
1034     ASSERT(node->op() == PureGetById);
1035
1036     switch (node->child1().useKind()) {
1037     case CellUse: {
1038         SpeculateCellOperand base(this, node->child1());
1039         JSValueRegsTemporary result(this, Reuse, base);
1040
1041         JSValueRegs baseRegs = JSValueRegs::payloadOnly(base.gpr());
1042         JSValueRegs resultRegs = result.regs();
1043
1044         cachedGetById(node->origin.semantic, baseRegs, resultRegs, node->identifierNumber(), JITCompiler::Jump(), NeedToSpill, AccessType::PureGet);
1045
1046         jsValueResult(resultRegs, node);
1047         break;
1048     }
1049     case UntypedUse: {
1050         JSValueOperand base(this, node->child1());
1051         JSValueRegsTemporary result(this, Reuse, base);
1052
1053         JSValueRegs baseRegs = base.jsValueRegs();
1054         JSValueRegs resultRegs = result.regs();
1055     
1056         JITCompiler::Jump notCell = m_jit.branchIfNotCell(baseRegs);
1057     
1058         cachedGetById(node->origin.semantic, baseRegs, resultRegs, node->identifierNumber(), notCell, NeedToSpill, AccessType::PureGet);
1059     
1060         jsValueResult(resultRegs, node);
1061         break;
1062     }
1063     default:
1064         RELEASE_ASSERT_NOT_REACHED();
1065     }
1066 }
1067
1068 void SpeculativeJIT::compileIn(Node* node)
1069 {
1070     SpeculateCellOperand base(this, node->child2());
1071     GPRReg baseGPR = base.gpr();
1072     
1073     if (JSString* string = node->child1()->dynamicCastConstant<JSString*>()) {
1074         if (string->tryGetValueImpl() && string->tryGetValueImpl()->isAtomic()) {
1075             StructureStubInfo* stubInfo = m_jit.codeBlock()->addStubInfo(AccessType::In);
1076             
1077             GPRTemporary result(this);
1078             GPRReg resultGPR = result.gpr();
1079
1080             use(node->child1());
1081             
1082             MacroAssembler::PatchableJump jump = m_jit.patchableJump();
1083             MacroAssembler::Label done = m_jit.label();
1084             
1085             // Since this block is executed only when the result of string->tryGetValueImpl() is atomic,
1086             // we can cast it to const AtomicStringImpl* safely.
1087             auto slowPath = slowPathCall(
1088                 jump.m_jump, this, operationInOptimize,
1089                 JSValueRegs::payloadOnly(resultGPR), stubInfo, baseGPR,
1090                 static_cast<const AtomicStringImpl*>(string->tryGetValueImpl()));
1091             
1092             stubInfo->callSiteIndex = m_jit.addCallSite(node->origin.semantic);
1093             stubInfo->codeOrigin = node->origin.semantic;
1094             stubInfo->patch.baseGPR = static_cast<int8_t>(baseGPR);
1095             stubInfo->patch.valueGPR = static_cast<int8_t>(resultGPR);
1096 #if USE(JSVALUE32_64)
1097             stubInfo->patch.valueTagGPR = static_cast<int8_t>(InvalidGPRReg);
1098             stubInfo->patch.baseTagGPR = static_cast<int8_t>(InvalidGPRReg);
1099 #endif
1100             stubInfo->patch.usedRegisters = usedRegisters();
1101
1102             m_jit.addIn(InRecord(jump, done, slowPath.get(), stubInfo));
1103             addSlowPathGenerator(WTFMove(slowPath));
1104
1105             base.use();
1106
1107             blessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
1108             return;
1109         }
1110     }
1111
1112     JSValueOperand key(this, node->child1());
1113     JSValueRegs regs = key.jsValueRegs();
1114         
1115     GPRFlushedCallResult result(this);
1116     GPRReg resultGPR = result.gpr();
1117         
1118     base.use();
1119     key.use();
1120         
1121     flushRegisters();
1122     callOperation(
1123         operationGenericIn, extractResult(JSValueRegs::payloadOnly(resultGPR)),
1124         baseGPR, regs);
1125     m_jit.exceptionCheck();
1126     blessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
1127 }
1128
1129 void SpeculativeJIT::compileDeleteById(Node* node)
1130 {
1131     JSValueOperand value(this, node->child1());
1132     GPRFlushedCallResult result(this);
1133
1134     JSValueRegs valueRegs = value.jsValueRegs();
1135     GPRReg resultGPR = result.gpr();
1136
1137     value.use();
1138
1139     flushRegisters();
1140     callOperation(operationDeleteById, resultGPR, valueRegs, identifierUID(node->identifierNumber()));
1141     m_jit.exceptionCheck();
1142
1143     unblessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
1144 }
1145
1146 void SpeculativeJIT::compileDeleteByVal(Node* node)
1147 {
1148     JSValueOperand base(this, node->child1());
1149     JSValueOperand key(this, node->child2());
1150     GPRFlushedCallResult result(this);
1151
1152     JSValueRegs baseRegs = base.jsValueRegs();
1153     JSValueRegs keyRegs = key.jsValueRegs();
1154     GPRReg resultGPR = result.gpr();
1155
1156     base.use();
1157     key.use();
1158
1159     flushRegisters();
1160     callOperation(operationDeleteByVal, resultGPR, baseRegs, keyRegs);
1161     m_jit.exceptionCheck();
1162
1163     unblessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
1164 }
1165
1166 bool SpeculativeJIT::nonSpeculativeCompare(Node* node, MacroAssembler::RelationalCondition cond, S_JITOperation_EJJ helperFunction)
1167 {
1168     unsigned branchIndexInBlock = detectPeepHoleBranch();
1169     if (branchIndexInBlock != UINT_MAX) {
1170         Node* branchNode = m_block->at(branchIndexInBlock);
1171
1172         ASSERT(node->adjustedRefCount() == 1);
1173         
1174         nonSpeculativePeepholeBranch(node, branchNode, cond, helperFunction);
1175     
1176         m_indexInBlock = branchIndexInBlock;
1177         m_currentNode = branchNode;
1178         
1179         return true;
1180     }
1181     
1182     nonSpeculativeNonPeepholeCompare(node, cond, helperFunction);
1183     
1184     return false;
1185 }
1186
1187 bool SpeculativeJIT::nonSpeculativeStrictEq(Node* node, bool invert)
1188 {
1189     unsigned branchIndexInBlock = detectPeepHoleBranch();
1190     if (branchIndexInBlock != UINT_MAX) {
1191         Node* branchNode = m_block->at(branchIndexInBlock);
1192
1193         ASSERT(node->adjustedRefCount() == 1);
1194         
1195         nonSpeculativePeepholeStrictEq(node, branchNode, invert);
1196     
1197         m_indexInBlock = branchIndexInBlock;
1198         m_currentNode = branchNode;
1199         
1200         return true;
1201     }
1202     
1203     nonSpeculativeNonPeepholeStrictEq(node, invert);
1204     
1205     return false;
1206 }
1207
1208 static const char* dataFormatString(DataFormat format)
1209 {
1210     // These values correspond to the DataFormat enum.
1211     const char* strings[] = {
1212         "[  ]",
1213         "[ i]",
1214         "[ d]",
1215         "[ c]",
1216         "Err!",
1217         "Err!",
1218         "Err!",
1219         "Err!",
1220         "[J ]",
1221         "[Ji]",
1222         "[Jd]",
1223         "[Jc]",
1224         "Err!",
1225         "Err!",
1226         "Err!",
1227         "Err!",
1228     };
1229     return strings[format];
1230 }
1231
1232 void SpeculativeJIT::dump(const char* label)
1233 {
1234     if (label)
1235         dataLogF("<%s>\n", label);
1236
1237     dataLogF("  gprs:\n");
1238     m_gprs.dump();
1239     dataLogF("  fprs:\n");
1240     m_fprs.dump();
1241     dataLogF("  VirtualRegisters:\n");
1242     for (unsigned i = 0; i < m_generationInfo.size(); ++i) {
1243         GenerationInfo& info = m_generationInfo[i];
1244         if (info.alive())
1245             dataLogF("    % 3d:%s%s", i, dataFormatString(info.registerFormat()), dataFormatString(info.spillFormat()));
1246         else
1247             dataLogF("    % 3d:[__][__]", i);
1248         if (info.registerFormat() == DataFormatDouble)
1249             dataLogF(":fpr%d\n", info.fpr());
1250         else if (info.registerFormat() != DataFormatNone
1251 #if USE(JSVALUE32_64)
1252             && !(info.registerFormat() & DataFormatJS)
1253 #endif
1254             ) {
1255             ASSERT(info.gpr() != InvalidGPRReg);
1256             dataLogF(":%s\n", GPRInfo::debugName(info.gpr()));
1257         } else
1258             dataLogF("\n");
1259     }
1260     if (label)
1261         dataLogF("</%s>\n", label);
1262 }
1263
1264 GPRTemporary::GPRTemporary()
1265     : m_jit(0)
1266     , m_gpr(InvalidGPRReg)
1267 {
1268 }
1269
1270 GPRTemporary::GPRTemporary(SpeculativeJIT* jit)
1271     : m_jit(jit)
1272     , m_gpr(InvalidGPRReg)
1273 {
1274     m_gpr = m_jit->allocate();
1275 }
1276
1277 GPRTemporary::GPRTemporary(SpeculativeJIT* jit, GPRReg specific)
1278     : m_jit(jit)
1279     , m_gpr(InvalidGPRReg)
1280 {
1281     m_gpr = m_jit->allocate(specific);
1282 }
1283
1284 #if USE(JSVALUE32_64)
1285 GPRTemporary::GPRTemporary(
1286     SpeculativeJIT* jit, ReuseTag, JSValueOperand& op1, WhichValueWord which)
1287     : m_jit(jit)
1288     , m_gpr(InvalidGPRReg)
1289 {
1290     if (!op1.isDouble() && m_jit->canReuse(op1.node()))
1291         m_gpr = m_jit->reuse(op1.gpr(which));
1292     else
1293         m_gpr = m_jit->allocate();
1294 }
1295 #endif // USE(JSVALUE32_64)
1296
1297 JSValueRegsTemporary::JSValueRegsTemporary() { }
1298
1299 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit)
1300 #if USE(JSVALUE64)
1301     : m_gpr(jit)
1302 #else
1303     : m_payloadGPR(jit)
1304     , m_tagGPR(jit)
1305 #endif
1306 {
1307 }
1308
1309 #if USE(JSVALUE64)
1310 template<typename T>
1311 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit, ReuseTag, T& operand, WhichValueWord)
1312     : m_gpr(jit, Reuse, operand)
1313 {
1314 }
1315 #else
1316 template<typename T>
1317 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit, ReuseTag, T& operand, WhichValueWord resultWord)
1318 {
1319     if (resultWord == PayloadWord) {
1320         m_payloadGPR = GPRTemporary(jit, Reuse, operand);
1321         m_tagGPR = GPRTemporary(jit);
1322     } else {
1323         m_payloadGPR = GPRTemporary(jit);
1324         m_tagGPR = GPRTemporary(jit, Reuse, operand);
1325     }
1326 }
1327 #endif
1328
1329 #if USE(JSVALUE64)
1330 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit, ReuseTag, JSValueOperand& operand)
1331 {
1332     m_gpr = GPRTemporary(jit, Reuse, operand);
1333 }
1334 #else
1335 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit, ReuseTag, JSValueOperand& operand)
1336 {
1337     if (jit->canReuse(operand.node())) {
1338         m_payloadGPR = GPRTemporary(jit, Reuse, operand, PayloadWord);
1339         m_tagGPR = GPRTemporary(jit, Reuse, operand, TagWord);
1340     } else {
1341         m_payloadGPR = GPRTemporary(jit);
1342         m_tagGPR = GPRTemporary(jit);
1343     }
1344 }
1345 #endif
1346
1347 JSValueRegsTemporary::~JSValueRegsTemporary() { }
1348
1349 JSValueRegs JSValueRegsTemporary::regs()
1350 {
1351 #if USE(JSVALUE64)
1352     return JSValueRegs(m_gpr.gpr());
1353 #else
1354     return JSValueRegs(m_tagGPR.gpr(), m_payloadGPR.gpr());
1355 #endif
1356 }
1357
1358 void GPRTemporary::adopt(GPRTemporary& other)
1359 {
1360     ASSERT(!m_jit);
1361     ASSERT(m_gpr == InvalidGPRReg);
1362     ASSERT(other.m_jit);
1363     ASSERT(other.m_gpr != InvalidGPRReg);
1364     m_jit = other.m_jit;
1365     m_gpr = other.m_gpr;
1366     other.m_jit = 0;
1367     other.m_gpr = InvalidGPRReg;
1368 }
1369
1370 FPRTemporary::FPRTemporary(FPRTemporary&& other)
1371 {
1372     ASSERT(other.m_jit);
1373     ASSERT(other.m_fpr != InvalidFPRReg);
1374     m_jit = other.m_jit;
1375     m_fpr = other.m_fpr;
1376
1377     other.m_jit = nullptr;
1378 }
1379
1380 FPRTemporary::FPRTemporary(SpeculativeJIT* jit)
1381     : m_jit(jit)
1382     , m_fpr(InvalidFPRReg)
1383 {
1384     m_fpr = m_jit->fprAllocate();
1385 }
1386
1387 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1)
1388     : m_jit(jit)
1389     , m_fpr(InvalidFPRReg)
1390 {
1391     if (m_jit->canReuse(op1.node()))
1392         m_fpr = m_jit->reuse(op1.fpr());
1393     else
1394         m_fpr = m_jit->fprAllocate();
1395 }
1396
1397 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1, SpeculateDoubleOperand& op2)
1398     : m_jit(jit)
1399     , m_fpr(InvalidFPRReg)
1400 {
1401     if (m_jit->canReuse(op1.node()))
1402         m_fpr = m_jit->reuse(op1.fpr());
1403     else if (m_jit->canReuse(op2.node()))
1404         m_fpr = m_jit->reuse(op2.fpr());
1405     else if (m_jit->canReuse(op1.node(), op2.node()) && op1.fpr() == op2.fpr())
1406         m_fpr = m_jit->reuse(op1.fpr());
1407     else
1408         m_fpr = m_jit->fprAllocate();
1409 }
1410
1411 #if USE(JSVALUE32_64)
1412 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, JSValueOperand& op1)
1413     : m_jit(jit)
1414     , m_fpr(InvalidFPRReg)
1415 {
1416     if (op1.isDouble() && m_jit->canReuse(op1.node()))
1417         m_fpr = m_jit->reuse(op1.fpr());
1418     else
1419         m_fpr = m_jit->fprAllocate();
1420 }
1421 #endif
1422
1423 void SpeculativeJIT::compilePeepHoleDoubleBranch(Node* node, Node* branchNode, JITCompiler::DoubleCondition condition)
1424 {
1425     BasicBlock* taken = branchNode->branchData()->taken.block;
1426     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1427
1428     if (taken == nextBlock()) {
1429         condition = MacroAssembler::invert(condition);
1430         std::swap(taken, notTaken);
1431     }
1432
1433     SpeculateDoubleOperand op1(this, node->child1());
1434     SpeculateDoubleOperand op2(this, node->child2());
1435     
1436     branchDouble(condition, op1.fpr(), op2.fpr(), taken);
1437     jump(notTaken);
1438 }
1439
1440 void SpeculativeJIT::compilePeepHoleObjectEquality(Node* node, Node* branchNode)
1441 {
1442     BasicBlock* taken = branchNode->branchData()->taken.block;
1443     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1444
1445     MacroAssembler::RelationalCondition condition = MacroAssembler::Equal;
1446     
1447     if (taken == nextBlock()) {
1448         condition = MacroAssembler::NotEqual;
1449         BasicBlock* tmp = taken;
1450         taken = notTaken;
1451         notTaken = tmp;
1452     }
1453
1454     SpeculateCellOperand op1(this, node->child1());
1455     SpeculateCellOperand op2(this, node->child2());
1456     
1457     GPRReg op1GPR = op1.gpr();
1458     GPRReg op2GPR = op2.gpr();
1459     
1460     if (masqueradesAsUndefinedWatchpointIsStillValid()) {
1461         if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1462             speculationCheck(
1463                 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(), m_jit.branchIfNotObject(op1GPR));
1464         }
1465         if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1466             speculationCheck(
1467                 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(), m_jit.branchIfNotObject(op2GPR));
1468         }
1469     } else {
1470         if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1471             speculationCheck(
1472                 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1473                 m_jit.branchIfNotObject(op1GPR));
1474         }
1475         speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1476             m_jit.branchTest8(
1477                 MacroAssembler::NonZero, 
1478                 MacroAssembler::Address(op1GPR, JSCell::typeInfoFlagsOffset()), 
1479                 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1480
1481         if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1482             speculationCheck(
1483                 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1484                 m_jit.branchIfNotObject(op2GPR));
1485         }
1486         speculationCheck(BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1487             m_jit.branchTest8(
1488                 MacroAssembler::NonZero, 
1489                 MacroAssembler::Address(op2GPR, JSCell::typeInfoFlagsOffset()), 
1490                 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1491     }
1492
1493     branchPtr(condition, op1GPR, op2GPR, taken);
1494     jump(notTaken);
1495 }
1496
1497 void SpeculativeJIT::compilePeepHoleBooleanBranch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1498 {
1499     BasicBlock* taken = branchNode->branchData()->taken.block;
1500     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1501
1502     // The branch instruction will branch to the taken block.
1503     // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1504     if (taken == nextBlock()) {
1505         condition = JITCompiler::invert(condition);
1506         BasicBlock* tmp = taken;
1507         taken = notTaken;
1508         notTaken = tmp;
1509     }
1510
1511     if (node->child1()->isInt32Constant()) {
1512         int32_t imm = node->child1()->asInt32();
1513         SpeculateBooleanOperand op2(this, node->child2());
1514         branch32(condition, JITCompiler::Imm32(imm), op2.gpr(), taken);
1515     } else if (node->child2()->isInt32Constant()) {
1516         SpeculateBooleanOperand op1(this, node->child1());
1517         int32_t imm = node->child2()->asInt32();
1518         branch32(condition, op1.gpr(), JITCompiler::Imm32(imm), taken);
1519     } else {
1520         SpeculateBooleanOperand op1(this, node->child1());
1521         SpeculateBooleanOperand op2(this, node->child2());
1522         branch32(condition, op1.gpr(), op2.gpr(), taken);
1523     }
1524
1525     jump(notTaken);
1526 }
1527
1528 void SpeculativeJIT::compileToLowerCase(Node* node)
1529 {
1530     ASSERT(node->op() == ToLowerCase);
1531     SpeculateCellOperand string(this, node->child1());
1532     GPRTemporary temp(this);
1533     GPRTemporary index(this);
1534     GPRTemporary charReg(this);
1535     GPRTemporary length(this);
1536
1537     GPRReg stringGPR = string.gpr();
1538     GPRReg tempGPR = temp.gpr();
1539     GPRReg indexGPR = index.gpr();
1540     GPRReg charGPR = charReg.gpr();
1541     GPRReg lengthGPR = length.gpr();
1542
1543     speculateString(node->child1(), stringGPR);
1544
1545     CCallHelpers::JumpList slowPath;
1546
1547     m_jit.move(TrustedImmPtr(0), indexGPR);
1548
1549     m_jit.loadPtr(MacroAssembler::Address(stringGPR, JSString::offsetOfValue()), tempGPR);
1550     slowPath.append(m_jit.branchTestPtr(MacroAssembler::Zero, tempGPR));
1551
1552     slowPath.append(m_jit.branchTest32(
1553         MacroAssembler::Zero, MacroAssembler::Address(tempGPR, StringImpl::flagsOffset()),
1554         MacroAssembler::TrustedImm32(StringImpl::flagIs8Bit())));
1555     m_jit.load32(MacroAssembler::Address(tempGPR, StringImpl::lengthMemoryOffset()), lengthGPR);
1556     m_jit.loadPtr(MacroAssembler::Address(tempGPR, StringImpl::dataOffset()), tempGPR);
1557
1558     auto loopStart = m_jit.label();
1559     auto loopDone = m_jit.branch32(CCallHelpers::AboveOrEqual, indexGPR, lengthGPR);
1560     m_jit.load8(MacroAssembler::BaseIndex(tempGPR, indexGPR, MacroAssembler::TimesOne), charGPR);
1561     slowPath.append(m_jit.branchTest32(CCallHelpers::NonZero, charGPR, TrustedImm32(~0x7F)));
1562     m_jit.sub32(TrustedImm32('A'), charGPR);
1563     slowPath.append(m_jit.branch32(CCallHelpers::BelowOrEqual, charGPR, TrustedImm32('Z' - 'A')));
1564
1565     m_jit.add32(TrustedImm32(1), indexGPR);
1566     m_jit.jump().linkTo(loopStart, &m_jit);
1567     
1568     slowPath.link(&m_jit);
1569     silentSpillAllRegisters(lengthGPR);
1570     callOperation(operationToLowerCase, lengthGPR, stringGPR, indexGPR);
1571     silentFillAllRegisters(lengthGPR);
1572     m_jit.exceptionCheck();
1573     auto done = m_jit.jump();
1574
1575     loopDone.link(&m_jit);
1576     m_jit.move(stringGPR, lengthGPR);
1577
1578     done.link(&m_jit);
1579     cellResult(lengthGPR, node);
1580 }
1581
1582 void SpeculativeJIT::compilePeepHoleInt32Branch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1583 {
1584     BasicBlock* taken = branchNode->branchData()->taken.block;
1585     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1586
1587     // The branch instruction will branch to the taken block.
1588     // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1589     if (taken == nextBlock()) {
1590         condition = JITCompiler::invert(condition);
1591         BasicBlock* tmp = taken;
1592         taken = notTaken;
1593         notTaken = tmp;
1594     }
1595
1596     if (node->child1()->isInt32Constant()) {
1597         int32_t imm = node->child1()->asInt32();
1598         SpeculateInt32Operand op2(this, node->child2());
1599         branch32(condition, JITCompiler::Imm32(imm), op2.gpr(), taken);
1600     } else if (node->child2()->isInt32Constant()) {
1601         SpeculateInt32Operand op1(this, node->child1());
1602         int32_t imm = node->child2()->asInt32();
1603         branch32(condition, op1.gpr(), JITCompiler::Imm32(imm), taken);
1604     } else {
1605         SpeculateInt32Operand op1(this, node->child1());
1606         SpeculateInt32Operand op2(this, node->child2());
1607         branch32(condition, op1.gpr(), op2.gpr(), taken);
1608     }
1609
1610     jump(notTaken);
1611 }
1612
1613 // Returns true if the compare is fused with a subsequent branch.
1614 bool SpeculativeJIT::compilePeepHoleBranch(Node* node, MacroAssembler::RelationalCondition condition, MacroAssembler::DoubleCondition doubleCondition, S_JITOperation_EJJ operation)
1615 {
1616     // Fused compare & branch.
1617     unsigned branchIndexInBlock = detectPeepHoleBranch();
1618     if (branchIndexInBlock != UINT_MAX) {
1619         Node* branchNode = m_block->at(branchIndexInBlock);
1620
1621         // detectPeepHoleBranch currently only permits the branch to be the very next node,
1622         // so can be no intervening nodes to also reference the compare. 
1623         ASSERT(node->adjustedRefCount() == 1);
1624
1625         if (node->isBinaryUseKind(Int32Use))
1626             compilePeepHoleInt32Branch(node, branchNode, condition);
1627 #if USE(JSVALUE64)
1628         else if (node->isBinaryUseKind(Int52RepUse))
1629             compilePeepHoleInt52Branch(node, branchNode, condition);
1630 #endif // USE(JSVALUE64)
1631         else if (node->isBinaryUseKind(StringUse) || node->isBinaryUseKind(StringIdentUse)) {
1632             // Use non-peephole comparison, for now.
1633             return false;
1634         } else if (node->isBinaryUseKind(DoubleRepUse))
1635             compilePeepHoleDoubleBranch(node, branchNode, doubleCondition);
1636         else if (node->op() == CompareEq) {
1637             if (node->isBinaryUseKind(BooleanUse))
1638                 compilePeepHoleBooleanBranch(node, branchNode, condition);
1639             else if (node->isBinaryUseKind(SymbolUse))
1640                 compilePeepHoleSymbolEquality(node, branchNode);
1641             else if (node->isBinaryUseKind(ObjectUse))
1642                 compilePeepHoleObjectEquality(node, branchNode);
1643             else if (node->isBinaryUseKind(ObjectUse, ObjectOrOtherUse))
1644                 compilePeepHoleObjectToObjectOrOtherEquality(node->child1(), node->child2(), branchNode);
1645             else if (node->isBinaryUseKind(ObjectOrOtherUse, ObjectUse))
1646                 compilePeepHoleObjectToObjectOrOtherEquality(node->child2(), node->child1(), branchNode);
1647             else if (!needsTypeCheck(node->child1(), SpecOther))
1648                 nonSpeculativePeepholeBranchNullOrUndefined(node->child2(), branchNode);
1649             else if (!needsTypeCheck(node->child2(), SpecOther))
1650                 nonSpeculativePeepholeBranchNullOrUndefined(node->child1(), branchNode);
1651             else {
1652                 nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1653                 return true;
1654             }
1655         } else {
1656             nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1657             return true;
1658         }
1659
1660         use(node->child1());
1661         use(node->child2());
1662         m_indexInBlock = branchIndexInBlock;
1663         m_currentNode = branchNode;
1664         return true;
1665     }
1666     return false;
1667 }
1668
1669 void SpeculativeJIT::noticeOSRBirth(Node* node)
1670 {
1671     if (!node->hasVirtualRegister())
1672         return;
1673     
1674     VirtualRegister virtualRegister = node->virtualRegister();
1675     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1676     
1677     info.noticeOSRBirth(*m_stream, node, virtualRegister);
1678 }
1679
1680 void SpeculativeJIT::compileMovHint(Node* node)
1681 {
1682     ASSERT(node->containsMovHint() && node->op() != ZombieHint);
1683     
1684     Node* child = node->child1().node();
1685     noticeOSRBirth(child);
1686     
1687     m_stream->appendAndLog(VariableEvent::movHint(MinifiedID(child), node->unlinkedLocal()));
1688 }
1689
1690 void SpeculativeJIT::bail(AbortReason reason)
1691 {
1692     if (verboseCompilationEnabled())
1693         dataLog("Bailing compilation.\n");
1694     m_compileOkay = true;
1695     m_jit.abortWithReason(reason, m_lastGeneratedNode);
1696     clearGenerationInfo();
1697 }
1698
1699 void SpeculativeJIT::compileCurrentBlock()
1700 {
1701     ASSERT(m_compileOkay);
1702     
1703     if (!m_block)
1704         return;
1705     
1706     ASSERT(m_block->isReachable);
1707     
1708     m_jit.blockHeads()[m_block->index] = m_jit.label();
1709
1710     if (!m_block->intersectionOfCFAHasVisited) {
1711         // Don't generate code for basic blocks that are unreachable according to CFA.
1712         // But to be sure that nobody has generated a jump to this block, drop in a
1713         // breakpoint here.
1714         m_jit.abortWithReason(DFGUnreachableBasicBlock);
1715         return;
1716     }
1717
1718     m_stream->appendAndLog(VariableEvent::reset());
1719     
1720     m_jit.jitAssertHasValidCallFrame();
1721     m_jit.jitAssertTagsInPlace();
1722     m_jit.jitAssertArgumentCountSane();
1723
1724     m_state.reset();
1725     m_state.beginBasicBlock(m_block);
1726     
1727     for (size_t i = m_block->variablesAtHead.size(); i--;) {
1728         int operand = m_block->variablesAtHead.operandForIndex(i);
1729         Node* node = m_block->variablesAtHead[i];
1730         if (!node)
1731             continue; // No need to record dead SetLocal's.
1732         
1733         VariableAccessData* variable = node->variableAccessData();
1734         DataFormat format;
1735         if (!node->refCount())
1736             continue; // No need to record dead SetLocal's.
1737         format = dataFormatFor(variable->flushFormat());
1738         m_stream->appendAndLog(
1739             VariableEvent::setLocal(
1740                 VirtualRegister(operand),
1741                 variable->machineLocal(),
1742                 format));
1743     }
1744
1745     m_origin = NodeOrigin();
1746     
1747     for (m_indexInBlock = 0; m_indexInBlock < m_block->size(); ++m_indexInBlock) {
1748         m_currentNode = m_block->at(m_indexInBlock);
1749         
1750         // We may have hit a contradiction that the CFA was aware of but that the JIT
1751         // didn't cause directly.
1752         if (!m_state.isValid()) {
1753             bail(DFGBailedAtTopOfBlock);
1754             return;
1755         }
1756
1757         m_interpreter.startExecuting();
1758         m_interpreter.executeKnownEdgeTypes(m_currentNode);
1759         m_jit.setForNode(m_currentNode);
1760         m_origin = m_currentNode->origin;
1761         if (validationEnabled())
1762             m_origin.exitOK &= mayExit(m_jit.graph(), m_currentNode) == Exits;
1763         m_lastGeneratedNode = m_currentNode->op();
1764         
1765         ASSERT(m_currentNode->shouldGenerate());
1766         
1767         if (verboseCompilationEnabled()) {
1768             dataLogF(
1769                 "SpeculativeJIT generating Node @%d (bc#%u) at JIT offset 0x%x",
1770                 (int)m_currentNode->index(),
1771                 m_currentNode->origin.semantic.bytecodeIndex, m_jit.debugOffset());
1772             dataLog("\n");
1773         }
1774
1775         if (Options::validateDFGExceptionHandling() && (mayExit(m_jit.graph(), m_currentNode) != DoesNotExit || m_currentNode->isTerminal()))
1776             m_jit.jitReleaseAssertNoException();
1777
1778         m_jit.pcToCodeOriginMapBuilder().appendItem(m_jit.labelIgnoringWatchpoints(), m_origin.semantic);
1779
1780         compile(m_currentNode);
1781         
1782         if (belongsInMinifiedGraph(m_currentNode->op()))
1783             m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode));
1784         
1785 #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
1786         m_jit.clearRegisterAllocationOffsets();
1787 #endif
1788         
1789         if (!m_compileOkay) {
1790             bail(DFGBailedAtEndOfNode);
1791             return;
1792         }
1793         
1794         // Make sure that the abstract state is rematerialized for the next node.
1795         m_interpreter.executeEffects(m_indexInBlock);
1796     }
1797     
1798     // Perform the most basic verification that children have been used correctly.
1799     if (!ASSERT_DISABLED) {
1800         for (unsigned index = 0; index < m_generationInfo.size(); ++index) {
1801             GenerationInfo& info = m_generationInfo[index];
1802             RELEASE_ASSERT(!info.alive());
1803         }
1804     }
1805 }
1806
1807 // If we are making type predictions about our arguments then
1808 // we need to check that they are correct on function entry.
1809 void SpeculativeJIT::checkArgumentTypes()
1810 {
1811     ASSERT(!m_currentNode);
1812     m_origin = NodeOrigin(CodeOrigin(0), CodeOrigin(0), true);
1813
1814     for (int i = 0; i < m_jit.codeBlock()->numParameters(); ++i) {
1815         Node* node = m_jit.graph().m_arguments[i];
1816         if (!node) {
1817             // The argument is dead. We don't do any checks for such arguments.
1818             continue;
1819         }
1820         
1821         ASSERT(node->op() == SetArgument);
1822         ASSERT(node->shouldGenerate());
1823
1824         VariableAccessData* variableAccessData = node->variableAccessData();
1825         FlushFormat format = variableAccessData->flushFormat();
1826         
1827         if (format == FlushedJSValue)
1828             continue;
1829         
1830         VirtualRegister virtualRegister = variableAccessData->local();
1831
1832         JSValueSource valueSource = JSValueSource(JITCompiler::addressFor(virtualRegister));
1833         
1834 #if USE(JSVALUE64)
1835         switch (format) {
1836         case FlushedInt32: {
1837             speculationCheck(BadType, valueSource, node, m_jit.branch64(MacroAssembler::Below, JITCompiler::addressFor(virtualRegister), GPRInfo::tagTypeNumberRegister));
1838             break;
1839         }
1840         case FlushedBoolean: {
1841             GPRTemporary temp(this);
1842             m_jit.load64(JITCompiler::addressFor(virtualRegister), temp.gpr());
1843             m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), temp.gpr());
1844             speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, temp.gpr(), TrustedImm32(static_cast<int32_t>(~1))));
1845             break;
1846         }
1847         case FlushedCell: {
1848             speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, JITCompiler::addressFor(virtualRegister), GPRInfo::tagMaskRegister));
1849             break;
1850         }
1851         default:
1852             RELEASE_ASSERT_NOT_REACHED();
1853             break;
1854         }
1855 #else
1856         switch (format) {
1857         case FlushedInt32: {
1858             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::Int32Tag)));
1859             break;
1860         }
1861         case FlushedBoolean: {
1862             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::BooleanTag)));
1863             break;
1864         }
1865         case FlushedCell: {
1866             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::CellTag)));
1867             break;
1868         }
1869         default:
1870             RELEASE_ASSERT_NOT_REACHED();
1871             break;
1872         }
1873 #endif
1874     }
1875
1876     m_origin = NodeOrigin();
1877 }
1878
1879 bool SpeculativeJIT::compile()
1880 {
1881     checkArgumentTypes();
1882     
1883     ASSERT(!m_currentNode);
1884     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1885         m_jit.setForBlockIndex(blockIndex);
1886         m_block = m_jit.graph().block(blockIndex);
1887         compileCurrentBlock();
1888     }
1889     linkBranches();
1890     return true;
1891 }
1892
1893 void SpeculativeJIT::createOSREntries()
1894 {
1895     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1896         BasicBlock* block = m_jit.graph().block(blockIndex);
1897         if (!block)
1898             continue;
1899         if (!block->isOSRTarget)
1900             continue;
1901         
1902         // Currently we don't have OSR entry trampolines. We could add them
1903         // here if need be.
1904         m_osrEntryHeads.append(m_jit.blockHeads()[blockIndex]);
1905     }
1906 }
1907
1908 void SpeculativeJIT::linkOSREntries(LinkBuffer& linkBuffer)
1909 {
1910     unsigned osrEntryIndex = 0;
1911     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1912         BasicBlock* block = m_jit.graph().block(blockIndex);
1913         if (!block)
1914             continue;
1915         if (!block->isOSRTarget)
1916             continue;
1917         m_jit.noticeOSREntry(*block, m_osrEntryHeads[osrEntryIndex++], linkBuffer);
1918     }
1919     ASSERT(osrEntryIndex == m_osrEntryHeads.size());
1920     
1921     if (verboseCompilationEnabled()) {
1922         DumpContext dumpContext;
1923         dataLog("OSR Entries:\n");
1924         for (OSREntryData& entryData : m_jit.jitCode()->osrEntry)
1925             dataLog("    ", inContext(entryData, &dumpContext), "\n");
1926         if (!dumpContext.isEmpty())
1927             dumpContext.dump(WTF::dataFile());
1928     }
1929 }
1930
1931 void SpeculativeJIT::compileDoublePutByVal(Node* node, SpeculateCellOperand& base, SpeculateStrictInt32Operand& property)
1932 {
1933     Edge child3 = m_jit.graph().varArgChild(node, 2);
1934     Edge child4 = m_jit.graph().varArgChild(node, 3);
1935
1936     ArrayMode arrayMode = node->arrayMode();
1937     
1938     GPRReg baseReg = base.gpr();
1939     GPRReg propertyReg = property.gpr();
1940     
1941     SpeculateDoubleOperand value(this, child3);
1942
1943     FPRReg valueReg = value.fpr();
1944     
1945     DFG_TYPE_CHECK(
1946         JSValueRegs(), child3, SpecFullRealNumber,
1947         m_jit.branchDouble(
1948             MacroAssembler::DoubleNotEqualOrUnordered, valueReg, valueReg));
1949     
1950     if (!m_compileOkay)
1951         return;
1952     
1953     StorageOperand storage(this, child4);
1954     GPRReg storageReg = storage.gpr();
1955
1956     if (node->op() == PutByValAlias) {
1957         // Store the value to the array.
1958         GPRReg propertyReg = property.gpr();
1959         FPRReg valueReg = value.fpr();
1960         m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
1961         
1962         noResult(m_currentNode);
1963         return;
1964     }
1965     
1966     GPRTemporary temporary;
1967     GPRReg temporaryReg = temporaryRegisterForPutByVal(temporary, node);
1968
1969     MacroAssembler::Jump slowCase;
1970     
1971     if (arrayMode.isInBounds()) {
1972         speculationCheck(
1973             OutOfBounds, JSValueRegs(), 0,
1974             m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())));
1975     } else {
1976         MacroAssembler::Jump inBounds = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
1977         
1978         slowCase = m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfVectorLength()));
1979         
1980         if (!arrayMode.isOutOfBounds())
1981             speculationCheck(OutOfBounds, JSValueRegs(), 0, slowCase);
1982         
1983         m_jit.add32(TrustedImm32(1), propertyReg, temporaryReg);
1984         m_jit.store32(temporaryReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
1985         
1986         inBounds.link(&m_jit);
1987     }
1988     
1989     m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
1990
1991     base.use();
1992     property.use();
1993     value.use();
1994     storage.use();
1995     
1996     if (arrayMode.isOutOfBounds()) {
1997         addSlowPathGenerator(
1998             slowPathCall(
1999                 slowCase, this,
2000                 m_jit.codeBlock()->isStrictMode() ? operationPutDoubleByValBeyondArrayBoundsStrict : operationPutDoubleByValBeyondArrayBoundsNonStrict,
2001                 NoResult, baseReg, propertyReg, valueReg));
2002     }
2003
2004     noResult(m_currentNode, UseChildrenCalledExplicitly);
2005 }
2006
2007 void SpeculativeJIT::compileGetCharCodeAt(Node* node)
2008 {
2009     SpeculateCellOperand string(this, node->child1());
2010     SpeculateStrictInt32Operand index(this, node->child2());
2011     StorageOperand storage(this, node->child3());
2012
2013     GPRReg stringReg = string.gpr();
2014     GPRReg indexReg = index.gpr();
2015     GPRReg storageReg = storage.gpr();
2016     
2017     ASSERT(speculationChecked(m_state.forNode(node->child1()).m_type, SpecString));
2018
2019     // unsigned comparison so we can filter out negative indices and indices that are too large
2020     speculationCheck(Uncountable, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::AboveOrEqual, indexReg, MacroAssembler::Address(stringReg, JSString::offsetOfLength())));
2021
2022     GPRTemporary scratch(this);
2023     GPRReg scratchReg = scratch.gpr();
2024
2025     m_jit.loadPtr(MacroAssembler::Address(stringReg, JSString::offsetOfValue()), scratchReg);
2026
2027     // Load the character into scratchReg
2028     JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
2029
2030     m_jit.load8(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesOne, 0), scratchReg);
2031     JITCompiler::Jump cont8Bit = m_jit.jump();
2032
2033     is16Bit.link(&m_jit);
2034
2035     m_jit.load16(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesTwo, 0), scratchReg);
2036
2037     cont8Bit.link(&m_jit);
2038
2039     int32Result(scratchReg, m_currentNode);
2040 }
2041
2042 void SpeculativeJIT::compileGetByValOnString(Node* node)
2043 {
2044     SpeculateCellOperand base(this, node->child1());
2045     SpeculateStrictInt32Operand property(this, node->child2());
2046     StorageOperand storage(this, node->child3());
2047     GPRReg baseReg = base.gpr();
2048     GPRReg propertyReg = property.gpr();
2049     GPRReg storageReg = storage.gpr();
2050
2051     GPRTemporary scratch(this);
2052     GPRReg scratchReg = scratch.gpr();
2053 #if USE(JSVALUE32_64)
2054     GPRTemporary resultTag;
2055     GPRReg resultTagReg = InvalidGPRReg;
2056     if (node->arrayMode().isOutOfBounds()) {
2057         GPRTemporary realResultTag(this);
2058         resultTag.adopt(realResultTag);
2059         resultTagReg = resultTag.gpr();
2060     }
2061 #endif
2062
2063     ASSERT(ArrayMode(Array::String).alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2064
2065     // unsigned comparison so we can filter out negative indices and indices that are too large
2066     JITCompiler::Jump outOfBounds = m_jit.branch32(
2067         MacroAssembler::AboveOrEqual, propertyReg,
2068         MacroAssembler::Address(baseReg, JSString::offsetOfLength()));
2069     if (node->arrayMode().isInBounds())
2070         speculationCheck(OutOfBounds, JSValueRegs(), 0, outOfBounds);
2071
2072     m_jit.loadPtr(MacroAssembler::Address(baseReg, JSString::offsetOfValue()), scratchReg);
2073
2074     // Load the character into scratchReg
2075     JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
2076
2077     m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne, 0), scratchReg);
2078     JITCompiler::Jump cont8Bit = m_jit.jump();
2079
2080     is16Bit.link(&m_jit);
2081
2082     m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo, 0), scratchReg);
2083
2084     JITCompiler::Jump bigCharacter =
2085         m_jit.branch32(MacroAssembler::AboveOrEqual, scratchReg, TrustedImm32(0x100));
2086
2087     // 8 bit string values don't need the isASCII check.
2088     cont8Bit.link(&m_jit);
2089
2090     m_jit.lshift32(MacroAssembler::TrustedImm32(sizeof(void*) == 4 ? 2 : 3), scratchReg);
2091     m_jit.addPtr(MacroAssembler::TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), scratchReg);
2092     m_jit.loadPtr(scratchReg, scratchReg);
2093
2094     addSlowPathGenerator(
2095         slowPathCall(
2096             bigCharacter, this, operationSingleCharacterString, scratchReg, scratchReg));
2097
2098     if (node->arrayMode().isOutOfBounds()) {
2099 #if USE(JSVALUE32_64)
2100         m_jit.move(TrustedImm32(JSValue::CellTag), resultTagReg);
2101 #endif
2102
2103         JSGlobalObject* globalObject = m_jit.globalObjectFor(node->origin.semantic);
2104         bool prototypeChainIsSane = false;
2105         if (globalObject->stringPrototypeChainIsSane()) {
2106             // FIXME: This could be captured using a Speculation mode that means "out-of-bounds
2107             // loads return a trivial value". Something like SaneChainOutOfBounds. This should
2108             // speculate that we don't take negative out-of-bounds, or better yet, it should rely
2109             // on a stringPrototypeChainIsSane() guaranteeing that the prototypes have no negative
2110             // indexed properties either.
2111             // https://bugs.webkit.org/show_bug.cgi?id=144668
2112             m_jit.graph().watchpoints().addLazily(globalObject->stringPrototype()->structure()->transitionWatchpointSet());
2113             m_jit.graph().watchpoints().addLazily(globalObject->objectPrototype()->structure()->transitionWatchpointSet());
2114             prototypeChainIsSane = globalObject->stringPrototypeChainIsSane();
2115         }
2116         if (prototypeChainIsSane) {
2117             m_jit.graph().watchpoints().addLazily(globalObject->stringPrototype()->structure()->transitionWatchpointSet());
2118             m_jit.graph().watchpoints().addLazily(globalObject->objectPrototype()->structure()->transitionWatchpointSet());
2119             
2120 #if USE(JSVALUE64)
2121             addSlowPathGenerator(std::make_unique<SaneStringGetByValSlowPathGenerator>(
2122                 outOfBounds, this, JSValueRegs(scratchReg), baseReg, propertyReg));
2123 #else
2124             addSlowPathGenerator(std::make_unique<SaneStringGetByValSlowPathGenerator>(
2125                 outOfBounds, this, JSValueRegs(resultTagReg, scratchReg),
2126                 baseReg, propertyReg));
2127 #endif
2128         } else {
2129 #if USE(JSVALUE64)
2130             addSlowPathGenerator(
2131                 slowPathCall(
2132                     outOfBounds, this, operationGetByValStringInt,
2133                     scratchReg, baseReg, propertyReg));
2134 #else
2135             addSlowPathGenerator(
2136                 slowPathCall(
2137                     outOfBounds, this, operationGetByValStringInt,
2138                     JSValueRegs(resultTagReg, scratchReg), baseReg, propertyReg));
2139 #endif
2140         }
2141         
2142 #if USE(JSVALUE64)
2143         jsValueResult(scratchReg, m_currentNode);
2144 #else
2145         jsValueResult(resultTagReg, scratchReg, m_currentNode);
2146 #endif
2147     } else
2148         cellResult(scratchReg, m_currentNode);
2149 }
2150
2151 void SpeculativeJIT::compileFromCharCode(Node* node)
2152 {
2153     Edge& child = node->child1();
2154     if (child.useKind() == UntypedUse) {
2155         JSValueOperand opr(this, child);
2156         JSValueRegs oprRegs = opr.jsValueRegs();
2157 #if USE(JSVALUE64)
2158         GPRTemporary result(this);
2159         JSValueRegs resultRegs = JSValueRegs(result.gpr());
2160 #else
2161         GPRTemporary resultTag(this);
2162         GPRTemporary resultPayload(this);
2163         JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
2164 #endif
2165         flushRegisters();
2166         callOperation(operationStringFromCharCodeUntyped, resultRegs, oprRegs);
2167         m_jit.exceptionCheck();
2168         
2169         jsValueResult(resultRegs, node);
2170         return;
2171     }
2172
2173     SpeculateStrictInt32Operand property(this, child);
2174     GPRReg propertyReg = property.gpr();
2175     GPRTemporary smallStrings(this);
2176     GPRTemporary scratch(this);
2177     GPRReg scratchReg = scratch.gpr();
2178     GPRReg smallStringsReg = smallStrings.gpr();
2179
2180     JITCompiler::JumpList slowCases;
2181     slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, TrustedImm32(0xff)));
2182     m_jit.move(MacroAssembler::TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), smallStringsReg);
2183     m_jit.loadPtr(MacroAssembler::BaseIndex(smallStringsReg, propertyReg, MacroAssembler::ScalePtr, 0), scratchReg);
2184
2185     slowCases.append(m_jit.branchTest32(MacroAssembler::Zero, scratchReg));
2186     addSlowPathGenerator(slowPathCall(slowCases, this, operationStringFromCharCode, scratchReg, propertyReg));
2187     cellResult(scratchReg, m_currentNode);
2188 }
2189
2190 GeneratedOperandType SpeculativeJIT::checkGeneratedTypeForToInt32(Node* node)
2191 {
2192     VirtualRegister virtualRegister = node->virtualRegister();
2193     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
2194
2195     switch (info.registerFormat()) {
2196     case DataFormatStorage:
2197         RELEASE_ASSERT_NOT_REACHED();
2198
2199     case DataFormatBoolean:
2200     case DataFormatCell:
2201         terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
2202         return GeneratedOperandTypeUnknown;
2203
2204     case DataFormatNone:
2205     case DataFormatJSCell:
2206     case DataFormatJS:
2207     case DataFormatJSBoolean:
2208     case DataFormatJSDouble:
2209         return GeneratedOperandJSValue;
2210
2211     case DataFormatJSInt32:
2212     case DataFormatInt32:
2213         return GeneratedOperandInteger;
2214
2215     default:
2216         RELEASE_ASSERT_NOT_REACHED();
2217         return GeneratedOperandTypeUnknown;
2218     }
2219 }
2220
2221 void SpeculativeJIT::compileValueToInt32(Node* node)
2222 {
2223     switch (node->child1().useKind()) {
2224 #if USE(JSVALUE64)
2225     case Int52RepUse: {
2226         SpeculateStrictInt52Operand op1(this, node->child1());
2227         GPRTemporary result(this, Reuse, op1);
2228         GPRReg op1GPR = op1.gpr();
2229         GPRReg resultGPR = result.gpr();
2230         m_jit.zeroExtend32ToPtr(op1GPR, resultGPR);
2231         int32Result(resultGPR, node, DataFormatInt32);
2232         return;
2233     }
2234 #endif // USE(JSVALUE64)
2235         
2236     case DoubleRepUse: {
2237         GPRTemporary result(this);
2238         SpeculateDoubleOperand op1(this, node->child1());
2239         FPRReg fpr = op1.fpr();
2240         GPRReg gpr = result.gpr();
2241         JITCompiler::Jump notTruncatedToInteger = m_jit.branchTruncateDoubleToInt32(fpr, gpr, JITCompiler::BranchIfTruncateFailed);
2242         
2243         addSlowPathGenerator(slowPathCall(notTruncatedToInteger, this, operationToInt32, NeedToSpill, ExceptionCheckRequirement::CheckNotNeeded, gpr, fpr));
2244         
2245         int32Result(gpr, node);
2246         return;
2247     }
2248     
2249     case NumberUse:
2250     case NotCellUse: {
2251         switch (checkGeneratedTypeForToInt32(node->child1().node())) {
2252         case GeneratedOperandInteger: {
2253             SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
2254             GPRTemporary result(this, Reuse, op1);
2255             m_jit.move(op1.gpr(), result.gpr());
2256             int32Result(result.gpr(), node, op1.format());
2257             return;
2258         }
2259         case GeneratedOperandJSValue: {
2260             GPRTemporary result(this);
2261 #if USE(JSVALUE64)
2262             JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2263
2264             GPRReg gpr = op1.gpr();
2265             GPRReg resultGpr = result.gpr();
2266             FPRTemporary tempFpr(this);
2267             FPRReg fpr = tempFpr.fpr();
2268
2269             JITCompiler::Jump isInteger = m_jit.branch64(MacroAssembler::AboveOrEqual, gpr, GPRInfo::tagTypeNumberRegister);
2270             JITCompiler::JumpList converted;
2271
2272             if (node->child1().useKind() == NumberUse) {
2273                 DFG_TYPE_CHECK(
2274                     JSValueRegs(gpr), node->child1(), SpecBytecodeNumber,
2275                     m_jit.branchTest64(
2276                         MacroAssembler::Zero, gpr, GPRInfo::tagTypeNumberRegister));
2277             } else {
2278                 JITCompiler::Jump isNumber = m_jit.branchTest64(MacroAssembler::NonZero, gpr, GPRInfo::tagTypeNumberRegister);
2279                 
2280                 DFG_TYPE_CHECK(
2281                     JSValueRegs(gpr), node->child1(), ~SpecCell, m_jit.branchIfCell(JSValueRegs(gpr)));
2282                 
2283                 // It's not a cell: so true turns into 1 and all else turns into 0.
2284                 m_jit.compare64(JITCompiler::Equal, gpr, TrustedImm32(ValueTrue), resultGpr);
2285                 converted.append(m_jit.jump());
2286                 
2287                 isNumber.link(&m_jit);
2288             }
2289
2290             // First, if we get here we have a double encoded as a JSValue
2291             unboxDouble(gpr, resultGpr, fpr);
2292
2293             silentSpillAllRegisters(resultGpr);
2294             callOperation(operationToInt32, resultGpr, fpr);
2295             silentFillAllRegisters(resultGpr);
2296
2297             converted.append(m_jit.jump());
2298
2299             isInteger.link(&m_jit);
2300             m_jit.zeroExtend32ToPtr(gpr, resultGpr);
2301
2302             converted.link(&m_jit);
2303 #else
2304             Node* childNode = node->child1().node();
2305             VirtualRegister virtualRegister = childNode->virtualRegister();
2306             GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
2307
2308             JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2309
2310             GPRReg payloadGPR = op1.payloadGPR();
2311             GPRReg resultGpr = result.gpr();
2312         
2313             JITCompiler::JumpList converted;
2314
2315             if (info.registerFormat() == DataFormatJSInt32)
2316                 m_jit.move(payloadGPR, resultGpr);
2317             else {
2318                 GPRReg tagGPR = op1.tagGPR();
2319                 FPRTemporary tempFpr(this);
2320                 FPRReg fpr = tempFpr.fpr();
2321                 FPRTemporary scratch(this);
2322
2323                 JITCompiler::Jump isInteger = m_jit.branch32(MacroAssembler::Equal, tagGPR, TrustedImm32(JSValue::Int32Tag));
2324
2325                 if (node->child1().useKind() == NumberUse) {
2326                     DFG_TYPE_CHECK(
2327                         op1.jsValueRegs(), node->child1(), SpecBytecodeNumber,
2328                         m_jit.branch32(
2329                             MacroAssembler::AboveOrEqual, tagGPR,
2330                             TrustedImm32(JSValue::LowestTag)));
2331                 } else {
2332                     JITCompiler::Jump isNumber = m_jit.branch32(MacroAssembler::Below, tagGPR, TrustedImm32(JSValue::LowestTag));
2333                     
2334                     DFG_TYPE_CHECK(
2335                         op1.jsValueRegs(), node->child1(), ~SpecCell,
2336                         m_jit.branchIfCell(op1.jsValueRegs()));
2337                     
2338                     // It's not a cell: so true turns into 1 and all else turns into 0.
2339                     JITCompiler::Jump isBoolean = m_jit.branch32(JITCompiler::Equal, tagGPR, TrustedImm32(JSValue::BooleanTag));
2340                     m_jit.move(TrustedImm32(0), resultGpr);
2341                     converted.append(m_jit.jump());
2342                     
2343                     isBoolean.link(&m_jit);
2344                     m_jit.move(payloadGPR, resultGpr);
2345                     converted.append(m_jit.jump());
2346                     
2347                     isNumber.link(&m_jit);
2348                 }
2349
2350                 unboxDouble(tagGPR, payloadGPR, fpr, scratch.fpr());
2351
2352                 silentSpillAllRegisters(resultGpr);
2353                 callOperation(operationToInt32, resultGpr, fpr);
2354                 silentFillAllRegisters(resultGpr);
2355
2356                 converted.append(m_jit.jump());
2357
2358                 isInteger.link(&m_jit);
2359                 m_jit.move(payloadGPR, resultGpr);
2360
2361                 converted.link(&m_jit);
2362             }
2363 #endif
2364             int32Result(resultGpr, node);
2365             return;
2366         }
2367         case GeneratedOperandTypeUnknown:
2368             RELEASE_ASSERT(!m_compileOkay);
2369             return;
2370         }
2371         RELEASE_ASSERT_NOT_REACHED();
2372         return;
2373     }
2374     
2375     default:
2376         ASSERT(!m_compileOkay);
2377         return;
2378     }
2379 }
2380
2381 void SpeculativeJIT::compileUInt32ToNumber(Node* node)
2382 {
2383     if (doesOverflow(node->arithMode())) {
2384         if (enableInt52()) {
2385             SpeculateInt32Operand op1(this, node->child1());
2386             GPRTemporary result(this, Reuse, op1);
2387             m_jit.zeroExtend32ToPtr(op1.gpr(), result.gpr());
2388             strictInt52Result(result.gpr(), node);
2389             return;
2390         }
2391         SpeculateInt32Operand op1(this, node->child1());
2392         FPRTemporary result(this);
2393             
2394         GPRReg inputGPR = op1.gpr();
2395         FPRReg outputFPR = result.fpr();
2396             
2397         m_jit.convertInt32ToDouble(inputGPR, outputFPR);
2398             
2399         JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, inputGPR, TrustedImm32(0));
2400         m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), outputFPR);
2401         positive.link(&m_jit);
2402             
2403         doubleResult(outputFPR, node);
2404         return;
2405     }
2406     
2407     RELEASE_ASSERT(node->arithMode() == Arith::CheckOverflow);
2408
2409     SpeculateInt32Operand op1(this, node->child1());
2410     GPRTemporary result(this);
2411
2412     m_jit.move(op1.gpr(), result.gpr());
2413
2414     speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, result.gpr(), TrustedImm32(0)));
2415
2416     int32Result(result.gpr(), node, op1.format());
2417 }
2418
2419 void SpeculativeJIT::compileDoubleAsInt32(Node* node)
2420 {
2421     SpeculateDoubleOperand op1(this, node->child1());
2422     FPRTemporary scratch(this);
2423     GPRTemporary result(this);
2424     
2425     FPRReg valueFPR = op1.fpr();
2426     FPRReg scratchFPR = scratch.fpr();
2427     GPRReg resultGPR = result.gpr();
2428
2429     JITCompiler::JumpList failureCases;
2430     RELEASE_ASSERT(shouldCheckOverflow(node->arithMode()));
2431     m_jit.branchConvertDoubleToInt32(
2432         valueFPR, resultGPR, failureCases, scratchFPR,
2433         shouldCheckNegativeZero(node->arithMode()));
2434     speculationCheck(Overflow, JSValueRegs(), 0, failureCases);
2435
2436     int32Result(resultGPR, node);
2437 }
2438
2439 void SpeculativeJIT::compileDoubleRep(Node* node)
2440 {
2441     switch (node->child1().useKind()) {
2442     case RealNumberUse: {
2443         JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2444         FPRTemporary result(this);
2445         
2446         JSValueRegs op1Regs = op1.jsValueRegs();
2447         FPRReg resultFPR = result.fpr();
2448         
2449 #if USE(JSVALUE64)
2450         GPRTemporary temp(this);
2451         GPRReg tempGPR = temp.gpr();
2452         m_jit.unboxDoubleWithoutAssertions(op1Regs.gpr(), tempGPR, resultFPR);
2453 #else
2454         FPRTemporary temp(this);
2455         FPRReg tempFPR = temp.fpr();
2456         unboxDouble(op1Regs.tagGPR(), op1Regs.payloadGPR(), resultFPR, tempFPR);
2457 #endif
2458         
2459         JITCompiler::Jump done = m_jit.branchDouble(
2460             JITCompiler::DoubleEqual, resultFPR, resultFPR);
2461         
2462         DFG_TYPE_CHECK(
2463             op1Regs, node->child1(), SpecBytecodeRealNumber, m_jit.branchIfNotInt32(op1Regs));
2464         m_jit.convertInt32ToDouble(op1Regs.payloadGPR(), resultFPR);
2465         
2466         done.link(&m_jit);
2467         
2468         doubleResult(resultFPR, node);
2469         return;
2470     }
2471     
2472     case NotCellUse:
2473     case NumberUse: {
2474         ASSERT(!node->child1()->isNumberConstant()); // This should have been constant folded.
2475
2476         SpeculatedType possibleTypes = m_state.forNode(node->child1()).m_type;
2477         if (isInt32Speculation(possibleTypes)) {
2478             SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
2479             FPRTemporary result(this);
2480             m_jit.convertInt32ToDouble(op1.gpr(), result.fpr());
2481             doubleResult(result.fpr(), node);
2482             return;
2483         }
2484
2485         JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2486         FPRTemporary result(this);
2487
2488 #if USE(JSVALUE64)
2489         GPRTemporary temp(this);
2490
2491         GPRReg op1GPR = op1.gpr();
2492         GPRReg tempGPR = temp.gpr();
2493         FPRReg resultFPR = result.fpr();
2494         JITCompiler::JumpList done;
2495
2496         JITCompiler::Jump isInteger = m_jit.branch64(
2497             MacroAssembler::AboveOrEqual, op1GPR, GPRInfo::tagTypeNumberRegister);
2498
2499         if (node->child1().useKind() == NotCellUse) {
2500             JITCompiler::Jump isNumber = m_jit.branchTest64(MacroAssembler::NonZero, op1GPR, GPRInfo::tagTypeNumberRegister);
2501             JITCompiler::Jump isUndefined = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueUndefined));
2502
2503             static const double zero = 0;
2504             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&zero), resultFPR);
2505
2506             JITCompiler::Jump isNull = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueNull));
2507             done.append(isNull);
2508
2509             DFG_TYPE_CHECK(JSValueRegs(op1GPR), node->child1(), ~SpecCell,
2510                 m_jit.branchTest64(JITCompiler::Zero, op1GPR, TrustedImm32(static_cast<int32_t>(TagBitBool))));
2511
2512             JITCompiler::Jump isFalse = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueFalse));
2513             static const double one = 1;
2514             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&one), resultFPR);
2515             done.append(m_jit.jump());
2516             done.append(isFalse);
2517
2518             isUndefined.link(&m_jit);
2519             static const double NaN = PNaN;
2520             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&NaN), resultFPR);
2521             done.append(m_jit.jump());
2522
2523             isNumber.link(&m_jit);
2524         } else if (needsTypeCheck(node->child1(), SpecBytecodeNumber)) {
2525             typeCheck(
2526                 JSValueRegs(op1GPR), node->child1(), SpecBytecodeNumber,
2527                 m_jit.branchTest64(MacroAssembler::Zero, op1GPR, GPRInfo::tagTypeNumberRegister));
2528         }
2529
2530         unboxDouble(op1GPR, tempGPR, resultFPR);
2531         done.append(m_jit.jump());
2532     
2533         isInteger.link(&m_jit);
2534         m_jit.convertInt32ToDouble(op1GPR, resultFPR);
2535         done.link(&m_jit);
2536 #else // USE(JSVALUE64) -> this is the 32_64 case
2537         FPRTemporary temp(this);
2538     
2539         GPRReg op1TagGPR = op1.tagGPR();
2540         GPRReg op1PayloadGPR = op1.payloadGPR();
2541         FPRReg tempFPR = temp.fpr();
2542         FPRReg resultFPR = result.fpr();
2543         JITCompiler::JumpList done;
2544     
2545         JITCompiler::Jump isInteger = m_jit.branch32(
2546             MacroAssembler::Equal, op1TagGPR, TrustedImm32(JSValue::Int32Tag));
2547
2548         if (node->child1().useKind() == NotCellUse) {
2549             JITCompiler::Jump isNumber = m_jit.branch32(JITCompiler::Below, op1TagGPR, JITCompiler::TrustedImm32(JSValue::LowestTag + 1));
2550             JITCompiler::Jump isUndefined = m_jit.branch32(JITCompiler::Equal, op1TagGPR, TrustedImm32(JSValue::UndefinedTag));
2551
2552             static const double zero = 0;
2553             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&zero), resultFPR);
2554
2555             JITCompiler::Jump isNull = m_jit.branch32(JITCompiler::Equal, op1TagGPR, TrustedImm32(JSValue::NullTag));
2556             done.append(isNull);
2557
2558             DFG_TYPE_CHECK(JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), ~SpecCell, m_jit.branch32(JITCompiler::NotEqual, op1TagGPR, TrustedImm32(JSValue::BooleanTag)));
2559
2560             JITCompiler::Jump isFalse = m_jit.branchTest32(JITCompiler::Zero, op1PayloadGPR, TrustedImm32(1));
2561             static const double one = 1;
2562             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&one), resultFPR);
2563             done.append(m_jit.jump());
2564             done.append(isFalse);
2565
2566             isUndefined.link(&m_jit);
2567             static const double NaN = PNaN;
2568             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&NaN), resultFPR);
2569             done.append(m_jit.jump());
2570
2571             isNumber.link(&m_jit);
2572         } else if (needsTypeCheck(node->child1(), SpecBytecodeNumber)) {
2573             typeCheck(
2574                 JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), SpecBytecodeNumber,
2575                 m_jit.branch32(MacroAssembler::AboveOrEqual, op1TagGPR, TrustedImm32(JSValue::LowestTag)));
2576         }
2577
2578         unboxDouble(op1TagGPR, op1PayloadGPR, resultFPR, tempFPR);
2579         done.append(m_jit.jump());
2580     
2581         isInteger.link(&m_jit);
2582         m_jit.convertInt32ToDouble(op1PayloadGPR, resultFPR);
2583         done.link(&m_jit);
2584 #endif // USE(JSVALUE64)
2585     
2586         doubleResult(resultFPR, node);
2587         return;
2588     }
2589         
2590 #if USE(JSVALUE64)
2591     case Int52RepUse: {
2592         SpeculateStrictInt52Operand value(this, node->child1());
2593         FPRTemporary result(this);
2594         
2595         GPRReg valueGPR = value.gpr();
2596         FPRReg resultFPR = result.fpr();
2597
2598         m_jit.convertInt64ToDouble(valueGPR, resultFPR);
2599         
2600         doubleResult(resultFPR, node);
2601         return;
2602     }
2603 #endif // USE(JSVALUE64)
2604         
2605     default:
2606         RELEASE_ASSERT_NOT_REACHED();
2607         return;
2608     }
2609 }
2610
2611 void SpeculativeJIT::compileValueRep(Node* node)
2612 {
2613     switch (node->child1().useKind()) {
2614     case DoubleRepUse: {
2615         SpeculateDoubleOperand value(this, node->child1());
2616         JSValueRegsTemporary result(this);
2617         
2618         FPRReg valueFPR = value.fpr();
2619         JSValueRegs resultRegs = result.regs();
2620         
2621         // It's very tempting to in-place filter the value to indicate that it's not impure NaN
2622         // anymore. Unfortunately, this would be unsound. If it's a GetLocal or if the value was
2623         // subject to a prior SetLocal, filtering the value would imply that the corresponding
2624         // local was purified.
2625         if (needsTypeCheck(node->child1(), ~SpecDoubleImpureNaN))
2626             m_jit.purifyNaN(valueFPR);
2627
2628         boxDouble(valueFPR, resultRegs);
2629         
2630         jsValueResult(resultRegs, node);
2631         return;
2632     }
2633         
2634 #if USE(JSVALUE64)
2635     case Int52RepUse: {
2636         SpeculateStrictInt52Operand value(this, node->child1());
2637         GPRTemporary result(this);
2638         
2639         GPRReg valueGPR = value.gpr();
2640         GPRReg resultGPR = result.gpr();
2641         
2642         boxInt52(valueGPR, resultGPR, DataFormatStrictInt52);
2643         
2644         jsValueResult(resultGPR, node);
2645         return;
2646     }
2647 #endif // USE(JSVALUE64)
2648         
2649     default:
2650         RELEASE_ASSERT_NOT_REACHED();
2651         return;
2652     }
2653 }
2654
2655 static double clampDoubleToByte(double d)
2656 {
2657     d += 0.5;
2658     if (!(d > 0))
2659         d = 0;
2660     else if (d > 255)
2661         d = 255;
2662     return d;
2663 }
2664
2665 static void compileClampIntegerToByte(JITCompiler& jit, GPRReg result)
2666 {
2667     MacroAssembler::Jump inBounds = jit.branch32(MacroAssembler::BelowOrEqual, result, JITCompiler::TrustedImm32(0xff));
2668     MacroAssembler::Jump tooBig = jit.branch32(MacroAssembler::GreaterThan, result, JITCompiler::TrustedImm32(0xff));
2669     jit.xorPtr(result, result);
2670     MacroAssembler::Jump clamped = jit.jump();
2671     tooBig.link(&jit);
2672     jit.move(JITCompiler::TrustedImm32(255), result);
2673     clamped.link(&jit);
2674     inBounds.link(&jit);
2675 }
2676
2677 static void compileClampDoubleToByte(JITCompiler& jit, GPRReg result, FPRReg source, FPRReg scratch)
2678 {
2679     // Unordered compare so we pick up NaN
2680     static const double zero = 0;
2681     static const double byteMax = 255;
2682     static const double half = 0.5;
2683     jit.loadDouble(MacroAssembler::TrustedImmPtr(&zero), scratch);
2684     MacroAssembler::Jump tooSmall = jit.branchDouble(MacroAssembler::DoubleLessThanOrEqualOrUnordered, source, scratch);
2685     jit.loadDouble(MacroAssembler::TrustedImmPtr(&byteMax), scratch);
2686     MacroAssembler::Jump tooBig = jit.branchDouble(MacroAssembler::DoubleGreaterThan, source, scratch);
2687     
2688     jit.loadDouble(MacroAssembler::TrustedImmPtr(&half), scratch);
2689     // FIXME: This should probably just use a floating point round!
2690     // https://bugs.webkit.org/show_bug.cgi?id=72054
2691     jit.addDouble(source, scratch);
2692     jit.truncateDoubleToInt32(scratch, result);   
2693     MacroAssembler::Jump truncatedInt = jit.jump();
2694     
2695     tooSmall.link(&jit);
2696     jit.xorPtr(result, result);
2697     MacroAssembler::Jump zeroed = jit.jump();
2698     
2699     tooBig.link(&jit);
2700     jit.move(JITCompiler::TrustedImm32(255), result);
2701     
2702     truncatedInt.link(&jit);
2703     zeroed.link(&jit);
2704
2705 }
2706
2707 JITCompiler::Jump SpeculativeJIT::jumpForTypedArrayOutOfBounds(Node* node, GPRReg baseGPR, GPRReg indexGPR)
2708 {
2709     if (node->op() == PutByValAlias)
2710         return JITCompiler::Jump();
2711     JSArrayBufferView* view = m_jit.graph().tryGetFoldableView(
2712         m_state.forNode(m_jit.graph().child(node, 0)).m_value, node->arrayMode());
2713     if (view) {
2714         uint32_t length = view->length();
2715         Node* indexNode = m_jit.graph().child(node, 1).node();
2716         if (indexNode->isInt32Constant() && indexNode->asUInt32() < length)
2717             return JITCompiler::Jump();
2718         return m_jit.branch32(
2719             MacroAssembler::AboveOrEqual, indexGPR, MacroAssembler::Imm32(length));
2720     }
2721     return m_jit.branch32(
2722         MacroAssembler::AboveOrEqual, indexGPR,
2723         MacroAssembler::Address(baseGPR, JSArrayBufferView::offsetOfLength()));
2724 }
2725
2726 void SpeculativeJIT::emitTypedArrayBoundsCheck(Node* node, GPRReg baseGPR, GPRReg indexGPR)
2727 {
2728     JITCompiler::Jump jump = jumpForTypedArrayOutOfBounds(node, baseGPR, indexGPR);
2729     if (!jump.isSet())
2730         return;
2731     speculationCheck(OutOfBounds, JSValueRegs(), 0, jump);
2732 }
2733
2734 JITCompiler::Jump SpeculativeJIT::jumpForTypedArrayIsNeuteredIfOutOfBounds(Node* node, GPRReg base, JITCompiler::Jump outOfBounds)
2735 {
2736     JITCompiler::Jump done;
2737     if (outOfBounds.isSet()) {
2738         done = m_jit.jump();
2739         if (node->arrayMode().isInBounds())
2740             speculationCheck(OutOfBounds, JSValueSource(), 0, outOfBounds);
2741         else {
2742             outOfBounds.link(&m_jit);
2743
2744             JITCompiler::Jump notWasteful = m_jit.branch32(
2745                 MacroAssembler::NotEqual,
2746                 MacroAssembler::Address(base, JSArrayBufferView::offsetOfMode()),
2747                 TrustedImm32(WastefulTypedArray));
2748
2749             JITCompiler::Jump hasNullVector = m_jit.branchTestPtr(
2750                 MacroAssembler::Zero,
2751                 MacroAssembler::Address(base, JSArrayBufferView::offsetOfVector()));
2752             speculationCheck(Uncountable, JSValueSource(), node, hasNullVector);
2753             notWasteful.link(&m_jit);
2754         }
2755     }
2756     return done;
2757 }
2758
2759 void SpeculativeJIT::compileGetByValOnIntTypedArray(Node* node, TypedArrayType type)
2760 {
2761     ASSERT(isInt(type));
2762     
2763     SpeculateCellOperand base(this, node->child1());
2764     SpeculateStrictInt32Operand property(this, node->child2());
2765     StorageOperand storage(this, node->child3());
2766
2767     GPRReg baseReg = base.gpr();
2768     GPRReg propertyReg = property.gpr();
2769     GPRReg storageReg = storage.gpr();
2770
2771     GPRTemporary result(this);
2772     GPRReg resultReg = result.gpr();
2773
2774     ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2775
2776     emitTypedArrayBoundsCheck(node, baseReg, propertyReg);
2777     switch (elementSize(type)) {
2778     case 1:
2779         if (isSigned(type))
2780             m_jit.load8SignedExtendTo32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2781         else
2782             m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2783         break;
2784     case 2:
2785         if (isSigned(type))
2786             m_jit.load16SignedExtendTo32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2787         else
2788             m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2789         break;
2790     case 4:
2791         m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
2792         break;
2793     default:
2794         CRASH();
2795     }
2796     if (elementSize(type) < 4 || isSigned(type)) {
2797         int32Result(resultReg, node);
2798         return;
2799     }
2800     
2801     ASSERT(elementSize(type) == 4 && !isSigned(type));
2802     if (node->shouldSpeculateInt32()) {
2803         speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, resultReg, TrustedImm32(0)));
2804         int32Result(resultReg, node);
2805         return;
2806     }
2807     
2808 #if USE(JSVALUE64)
2809     if (node->shouldSpeculateAnyInt()) {
2810         m_jit.zeroExtend32ToPtr(resultReg, resultReg);
2811         strictInt52Result(resultReg, node);
2812         return;
2813     }
2814 #endif
2815     
2816     FPRTemporary fresult(this);
2817     m_jit.convertInt32ToDouble(resultReg, fresult.fpr());
2818     JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, resultReg, TrustedImm32(0));
2819     m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), fresult.fpr());
2820     positive.link(&m_jit);
2821     doubleResult(fresult.fpr(), node);
2822 }
2823
2824 void SpeculativeJIT::compilePutByValForIntTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
2825 {
2826     ASSERT(isInt(type));
2827     
2828     StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
2829     GPRReg storageReg = storage.gpr();
2830     
2831     Edge valueUse = m_jit.graph().varArgChild(node, 2);
2832     
2833     GPRTemporary value;
2834 #if USE(JSVALUE32_64)
2835     GPRTemporary propertyTag;
2836     GPRTemporary valueTag;
2837 #endif
2838
2839     GPRReg valueGPR = InvalidGPRReg;
2840 #if USE(JSVALUE32_64)
2841     GPRReg propertyTagGPR = InvalidGPRReg;
2842     GPRReg valueTagGPR = InvalidGPRReg;
2843 #endif
2844
2845     JITCompiler::JumpList slowPathCases;
2846
2847     if (valueUse->isConstant()) {
2848         JSValue jsValue = valueUse->asJSValue();
2849         if (!jsValue.isNumber()) {
2850             terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
2851             noResult(node);
2852             return;
2853         }
2854         double d = jsValue.asNumber();
2855         if (isClamped(type)) {
2856             ASSERT(elementSize(type) == 1);
2857             d = clampDoubleToByte(d);
2858         }
2859         GPRTemporary scratch(this);
2860         GPRReg scratchReg = scratch.gpr();
2861         m_jit.move(Imm32(toInt32(d)), scratchReg);
2862         value.adopt(scratch);
2863         valueGPR = scratchReg;
2864     } else {
2865         switch (valueUse.useKind()) {
2866         case Int32Use: {
2867             SpeculateInt32Operand valueOp(this, valueUse);
2868             GPRTemporary scratch(this);
2869             GPRReg scratchReg = scratch.gpr();
2870             m_jit.move(valueOp.gpr(), scratchReg);
2871             if (isClamped(type)) {
2872                 ASSERT(elementSize(type) == 1);
2873                 compileClampIntegerToByte(m_jit, scratchReg);
2874             }
2875             value.adopt(scratch);
2876             valueGPR = scratchReg;
2877             break;
2878         }
2879             
2880 #if USE(JSVALUE64)
2881         case Int52RepUse: {
2882             SpeculateStrictInt52Operand valueOp(this, valueUse);
2883             GPRTemporary scratch(this);
2884             GPRReg scratchReg = scratch.gpr();
2885             m_jit.move(valueOp.gpr(), scratchReg);
2886             if (isClamped(type)) {
2887                 ASSERT(elementSize(type) == 1);
2888                 MacroAssembler::Jump inBounds = m_jit.branch64(
2889                     MacroAssembler::BelowOrEqual, scratchReg, JITCompiler::TrustedImm64(0xff));
2890                 MacroAssembler::Jump tooBig = m_jit.branch64(
2891                     MacroAssembler::GreaterThan, scratchReg, JITCompiler::TrustedImm64(0xff));
2892                 m_jit.move(TrustedImm32(0), scratchReg);
2893                 MacroAssembler::Jump clamped = m_jit.jump();
2894                 tooBig.link(&m_jit);
2895                 m_jit.move(JITCompiler::TrustedImm32(255), scratchReg);
2896                 clamped.link(&m_jit);
2897                 inBounds.link(&m_jit);
2898             }
2899             value.adopt(scratch);
2900             valueGPR = scratchReg;
2901             break;
2902         }
2903 #endif // USE(JSVALUE64)
2904             
2905         case DoubleRepUse: {
2906             if (isClamped(type)) {
2907                 ASSERT(elementSize(type) == 1);
2908                 SpeculateDoubleOperand valueOp(this, valueUse);
2909                 GPRTemporary result(this);
2910                 FPRTemporary floatScratch(this);
2911                 FPRReg fpr = valueOp.fpr();
2912                 GPRReg gpr = result.gpr();
2913                 compileClampDoubleToByte(m_jit, gpr, fpr, floatScratch.fpr());
2914                 value.adopt(result);
2915                 valueGPR = gpr;
2916             } else {
2917 #if USE(JSVALUE32_64)
2918                 GPRTemporary realPropertyTag(this);
2919                 propertyTag.adopt(realPropertyTag);
2920                 propertyTagGPR = propertyTag.gpr();
2921
2922                 GPRTemporary realValueTag(this);
2923                 valueTag.adopt(realValueTag);
2924                 valueTagGPR = valueTag.gpr();
2925 #endif
2926                 SpeculateDoubleOperand valueOp(this, valueUse);
2927                 GPRTemporary result(this);
2928                 FPRReg fpr = valueOp.fpr();
2929                 GPRReg gpr = result.gpr();
2930                 MacroAssembler::Jump notNaN = m_jit.branchDouble(MacroAssembler::DoubleEqual, fpr, fpr);
2931                 m_jit.xorPtr(gpr, gpr);
2932                 MacroAssembler::JumpList fixed(m_jit.jump());
2933                 notNaN.link(&m_jit);
2934
2935                 fixed.append(m_jit.branchTruncateDoubleToInt32(
2936                     fpr, gpr, MacroAssembler::BranchIfTruncateSuccessful));
2937
2938 #if USE(JSVALUE64)
2939                 m_jit.or64(GPRInfo::tagTypeNumberRegister, property);
2940                 boxDouble(fpr, gpr);
2941 #else
2942                 m_jit.move(TrustedImm32(JSValue::Int32Tag), propertyTagGPR);
2943                 boxDouble(fpr, valueTagGPR, gpr);
2944 #endif
2945                 slowPathCases.append(m_jit.jump());
2946
2947                 fixed.link(&m_jit);
2948                 value.adopt(result);
2949                 valueGPR = gpr;
2950             }
2951             break;
2952         }
2953             
2954         default:
2955             RELEASE_ASSERT_NOT_REACHED();
2956             break;
2957         }
2958     }
2959     
2960     ASSERT_UNUSED(valueGPR, valueGPR != property);
2961     ASSERT(valueGPR != base);
2962     ASSERT(valueGPR != storageReg);
2963     JITCompiler::Jump outOfBounds = jumpForTypedArrayOutOfBounds(node, base, property);
2964
2965     switch (elementSize(type)) {
2966     case 1:
2967         m_jit.store8(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesOne));
2968         break;
2969     case 2:
2970         m_jit.store16(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesTwo));
2971         break;
2972     case 4:
2973         m_jit.store32(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
2974         break;
2975     default:
2976         CRASH();
2977     }
2978
2979     JITCompiler::Jump done = jumpForTypedArrayIsNeuteredIfOutOfBounds(node, base, outOfBounds);
2980     if (done.isSet())
2981         done.link(&m_jit);
2982
2983     if (!slowPathCases.empty()) {
2984 #if USE(JSVALUE64)
2985         if (node->op() == PutByValDirect) {
2986             addSlowPathGenerator(slowPathCall(
2987                 slowPathCases, this,
2988                 m_jit.isStrictModeFor(node->origin.semantic) ? operationPutByValDirectStrict : operationPutByValDirectNonStrict,
2989                 NoResult, base, property, valueGPR));
2990         } else {
2991             addSlowPathGenerator(slowPathCall(
2992                 slowPathCases, this,
2993                 m_jit.isStrictModeFor(node->origin.semantic) ? operationPutByValStrict : operationPutByValNonStrict,
2994                 NoResult, base, property, valueGPR));
2995         }
2996 #else // not USE(JSVALUE64)
2997         if (node->op() == PutByValDirect) {
2998             addSlowPathGenerator(slowPathCall(
2999                 slowPathCases, this,
3000                 m_jit.codeBlock()->isStrictMode() ? operationPutByValDirectCellStrict : operationPutByValDirectCellNonStrict,
3001                 NoResult, base, JSValueRegs(propertyTagGPR, property), JSValueRegs(valueTagGPR, valueGPR)));
3002         } else {
3003             addSlowPathGenerator(slowPathCall(
3004                 slowPathCases, this,
3005                 m_jit.codeBlock()->isStrictMode() ? operationPutByValCellStrict : operationPutByValCellNonStrict,
3006                 NoResult, base, JSValueRegs(propertyTagGPR, property), JSValueRegs(valueTagGPR, valueGPR)));
3007         }
3008 #endif
3009     }
3010     noResult(node);
3011 }
3012
3013 void SpeculativeJIT::compileGetByValOnFloatTypedArray(Node* node, TypedArrayType type)
3014 {
3015     ASSERT(isFloat(type));
3016     
3017     SpeculateCellOperand base(this, node->child1());
3018     SpeculateStrictInt32Operand property(this, node->child2());
3019     StorageOperand storage(this, node->child3());
3020
3021     GPRReg baseReg = base.gpr();
3022     GPRReg propertyReg = property.gpr();
3023     GPRReg storageReg = storage.gpr();
3024
3025     ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
3026
3027     FPRTemporary result(this);
3028     FPRReg resultReg = result.fpr();
3029     emitTypedArrayBoundsCheck(node, baseReg, propertyReg);
3030     switch (elementSize(type)) {
3031     case 4:
3032         m_jit.loadFloat(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
3033         m_jit.convertFloatToDouble(resultReg, resultReg);
3034         break;
3035     case 8: {
3036         m_jit.loadDouble(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight), resultReg);
3037         break;
3038     }
3039     default:
3040         RELEASE_ASSERT_NOT_REACHED();
3041     }
3042     
3043     doubleResult(resultReg, node);
3044 }
3045
3046 void SpeculativeJIT::compilePutByValForFloatTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
3047 {
3048     ASSERT(isFloat(type));
3049     
3050     StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
3051     GPRReg storageReg = storage.gpr();
3052     
3053     Edge baseUse = m_jit.graph().varArgChild(node, 0);
3054     Edge valueUse = m_jit.graph().varArgChild(node, 2);
3055
3056     SpeculateDoubleOperand valueOp(this, valueUse);
3057     FPRTemporary scratch(this);
3058     FPRReg valueFPR = valueOp.fpr();
3059     FPRReg scratchFPR = scratch.fpr();
3060
3061     ASSERT_UNUSED(baseUse, node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(baseUse)));
3062     
3063     MacroAssembler::Jump outOfBounds = jumpForTypedArrayOutOfBounds(node, base, property);
3064     
3065     switch (elementSize(type)) {
3066     case 4: {
3067         m_jit.moveDouble(valueFPR, scratchFPR);
3068         m_jit.convertDoubleToFloat(valueFPR, scratchFPR);
3069         m_jit.storeFloat(scratchFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
3070         break;
3071     }
3072     case 8:
3073         m_jit.storeDouble(valueFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesEight));
3074         break;
3075     default:
3076         RELEASE_ASSERT_NOT_REACHED();
3077     }
3078
3079     JITCompiler::Jump done = jumpForTypedArrayIsNeuteredIfOutOfBounds(node, base, outOfBounds);
3080     if (done.isSet())
3081         done.link(&m_jit);
3082     noResult(node);
3083 }
3084
3085 void SpeculativeJIT::compileInstanceOfForObject(Node*, GPRReg valueReg, GPRReg prototypeReg, GPRReg scratchReg, GPRReg scratch2Reg)
3086 {
3087     // Check that prototype is an object.
3088     speculationCheck(BadType, JSValueRegs(), 0, m_jit.branchIfNotObject(prototypeReg));
3089     
3090     // Initialize scratchReg with the value being checked.
3091     m_jit.move(valueReg, scratchReg);
3092     
3093     // Walk up the prototype chain of the value (in scratchReg), comparing to prototypeReg.
3094     MacroAssembler::Label loop(&m_jit);
3095     MacroAssembler::Jump performDefaultHasInstance = m_jit.branch8(MacroAssembler::Equal,
3096         MacroAssembler::Address(scratchReg, JSCell::typeInfoTypeOffset()), TrustedImm32(ProxyObjectType));
3097     m_jit.emitLoadStructure(scratchReg, scratchReg, scratch2Reg);
3098     m_jit.loadPtr(MacroAssembler::Address(scratchReg, Structure::prototypeOffset() + CellPayloadOffset), scratchReg);
3099     MacroAssembler::Jump isInstance = m_jit.branchPtr(MacroAssembler::Equal, scratchReg, prototypeReg);
3100 #if USE(JSVALUE64)
3101     m_jit.branchIfCell(JSValueRegs(scratchReg)).linkTo(loop, &m_jit);
3102 #else
3103     m_jit.branchTestPtr(MacroAssembler::NonZero, scratchReg).linkTo(loop, &m_jit);
3104 #endif
3105     
3106     // No match - result is false.
3107 #if USE(JSVALUE64)
3108     m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(false))), scratchReg);
3109 #else
3110     m_jit.move(MacroAssembler::TrustedImm32(0), scratchReg);
3111 #endif
3112     MacroAssembler::JumpList doneJumps; 
3113     doneJumps.append(m_jit.jump());
3114
3115     performDefaultHasInstance.link(&m_jit);
3116     silentSpillAllRegisters(scratchReg);
3117     callOperation(operationDefaultHasInstance, scratchReg, valueReg, prototypeReg); 
3118     silentFillAllRegisters(scratchReg);
3119     m_jit.exceptionCheck();
3120 #if USE(JSVALUE64)
3121     m_jit.or32(TrustedImm32(ValueFalse), scratchReg);
3122 #endif
3123     doneJumps.append(m_jit.jump());
3124     
3125     isInstance.link(&m_jit);
3126 #if USE(JSVALUE64)
3127     m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(true))), scratchReg);
3128 #else
3129     m_jit.move(MacroAssembler::TrustedImm32(1), scratchReg);
3130 #endif
3131     
3132     doneJumps.link(&m_jit);
3133 }
3134
3135 void SpeculativeJIT::compileCheckTypeInfoFlags(Node* node)
3136 {
3137     SpeculateCellOperand base(this, node->child1());
3138
3139     GPRReg baseGPR = base.gpr();
3140
3141     speculationCheck(BadTypeInfoFlags, JSValueRegs(), 0, m_jit.branchTest8(MacroAssembler::Zero, MacroAssembler::Address(baseGPR, JSCell::typeInfoFlagsOffset()), MacroAssembler::TrustedImm32(node->typeInfoOperand())));
3142
3143     noResult(node);
3144 }
3145
3146 void SpeculativeJIT::compileInstanceOf(Node* node)
3147 {
3148     if (node->child1().useKind() == UntypedUse) {
3149         // It might not be a cell. Speculate less aggressively.
3150         // Or: it might only be used once (i.e. by us), so we get zero benefit
3151         // from speculating any more aggressively than we absolutely need to.
3152         
3153         JSValueOperand value(this, node->child1());
3154         SpeculateCellOperand prototype(this, node->child2());
3155         GPRTemporary scratch(this);
3156         GPRTemporary scratch2(this);
3157         
3158         GPRReg prototypeReg = prototype.gpr();
3159         GPRReg scratchReg = scratch.gpr();
3160         GPRReg scratch2Reg = scratch2.gpr();
3161         
3162         MacroAssembler::Jump isCell = m_jit.branchIfCell(value.jsValueRegs());
3163         GPRReg valueReg = value.jsValueRegs().payloadGPR();
3164         moveFalseTo(scratchReg);
3165
3166         MacroAssembler::Jump done = m_jit.jump();
3167         
3168         isCell.link(&m_jit);
3169         
3170         compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg, scratch2Reg);
3171         
3172         done.link(&m_jit);
3173
3174         blessedBooleanResult(scratchReg, node);
3175         return;
3176     }
3177     
3178     SpeculateCellOperand value(this, node->child1());
3179     SpeculateCellOperand prototype(this, node->child2());
3180     
3181     GPRTemporary scratch(this);
3182     GPRTemporary scratch2(this);
3183     
3184     GPRReg valueReg = value.gpr();
3185     GPRReg prototypeReg = prototype.gpr();
3186     GPRReg scratchReg = scratch.gpr();
3187     GPRReg scratch2Reg = scratch2.gpr();
3188     
3189     compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg, scratch2Reg);
3190
3191     blessedBooleanResult(scratchReg, node);
3192 }
3193
3194 template<typename SnippetGenerator, J_JITOperation_EJJ snippetSlowPathFunction>
3195 void SpeculativeJIT::emitUntypedBitOp(Node* node)
3196 {
3197     Edge& leftChild = node->child1();
3198     Edge& rightChild = node->child2();
3199
3200     if (isKnownNotNumber(leftChild.node()) || isKnownNotNumber(rightChild.node())) {
3201         JSValueOperand left(this, leftChild);
3202         JSValueOperand right(this, rightChild);
3203         JSValueRegs leftRegs = left.jsValueRegs();
3204         JSValueRegs rightRegs = right.jsValueRegs();
3205 #if USE(JSVALUE64)
3206         GPRTemporary result(this);
3207         JSValueRegs resultRegs = JSValueRegs(result.gpr());
3208 #else
3209         GPRTemporary resultTag(this);
3210         GPRTemporary resultPayload(this);
3211         JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3212 #endif
3213         flushRegisters();
3214         callOperation(snippetSlowPathFunction, resultRegs, leftRegs, rightRegs);
3215         m_jit.exceptionCheck();
3216
3217         jsValueResult(resultRegs, node);
3218         return;
3219     }
3220
3221     Optional<JSValueOperand> left;
3222     Optional<JSValueOperand> right;
3223
3224     JSValueRegs leftRegs;
3225     JSValueRegs rightRegs;
3226
3227 #if USE(JSVALUE64)
3228     GPRTemporary result(this);
3229     JSValueRegs resultRegs = JSValueRegs(result.gpr());
3230     GPRTemporary scratch(this);
3231     GPRReg scratchGPR = scratch.gpr();
3232 #else
3233     GPRTemporary resultTag(this);
3234     GPRTemporary resultPayload(this);
3235     JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3236     GPRReg scratchGPR = resultTag.gpr();
3237 #endif
3238
3239     SnippetOperand leftOperand;
3240     SnippetOperand rightOperand;
3241
3242     // The snippet generator does not support both operands being constant. If the left
3243     // operand is already const, we'll ignore the right operand's constness.
3244     if (leftChild->isInt32Constant())
3245         leftOperand.setConstInt32(leftChild->asInt32());
3246     else if (rightChild->isInt32Constant())
3247         rightOperand.setConstInt32(rightChild->asInt32());
3248
3249     RELEASE_ASSERT(!leftOperand.isConst() || !rightOperand.isConst());
3250
3251     if (!leftOperand.isConst()) {
3252         left = JSValueOperand(this, leftChild);
3253         leftRegs = left->jsValueRegs();
3254     }
3255     if (!rightOperand.isConst()) {
3256         right = JSValueOperand(this, rightChild);
3257         rightRegs = right->jsValueRegs();
3258     }
3259
3260     SnippetGenerator gen(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs, scratchGPR);
3261     gen.generateFastPath(m_jit);
3262
3263     ASSERT(gen.didEmitFastPath());
3264     gen.endJumpList().append(m_jit.jump());
3265
3266     gen.slowPathJumpList().link(&m_jit);
3267     silentSpillAllRegisters(resultRegs);
3268
3269     if (leftOperand.isConst()) {
3270         leftRegs = resultRegs;
3271         m_jit.moveValue(leftChild->asJSValue(), leftRegs);
3272     } else if (rightOperand.isConst()) {
3273         rightRegs = resultRegs;
3274         m_jit.moveValue(rightChild->asJSValue(), rightRegs);
3275     }
3276
3277     callOperation(snippetSlowPathFunction, resultRegs, leftRegs, rightRegs);
3278
3279     silentFillAllRegisters(resultRegs);
3280     m_jit.exceptionCheck();
3281
3282     gen.endJumpList().link(&m_jit);
3283     jsValueResult(resultRegs, node);
3284 }
3285
3286 void SpeculativeJIT::compileBitwiseOp(Node* node)
3287 {
3288     NodeType op = node->op();
3289     Edge& leftChild = node->child1();
3290     Edge& rightChild = node->child2();
3291
3292     if (leftChild.useKind() == UntypedUse || rightChild.useKind() == UntypedUse) {
3293         switch (op) {
3294         case BitAnd:
3295             emitUntypedBitOp<JITBitAndGenerator, operationValueBitAnd>(node);
3296             return;
3297         case BitOr:
3298             emitUntypedBitOp<JITBitOrGenerator, operationValueBitOr>(node);
3299             return;
3300         case BitXor:
3301             emitUntypedBitOp<JITBitXorGenerator, operationValueBitXor>(node);
3302             return;
3303         default:
3304             RELEASE_ASSERT_NOT_REACHED();
3305         }
3306     }
3307
3308     if (leftChild->isInt32Constant()) {
3309         SpeculateInt32Operand op2(this, rightChild);
3310         GPRTemporary result(this, Reuse, op2);
3311
3312         bitOp(op, leftChild->asInt32(), op2.gpr(), result.gpr());
3313
3314         int32Result(result.gpr(), node);
3315
3316     } else if (rightChild->isInt32Constant()) {
3317         SpeculateInt32Operand op1(this, leftChild);
3318         GPRTemporary result(this, Reuse, op1);
3319
3320         bitOp(op, rightChild->asInt32(), op1.gpr(), result.gpr());
3321
3322         int32Result(result.gpr(), node);
3323
3324     } else {
3325         SpeculateInt32Operand op1(this, leftChild);
3326         SpeculateInt32Operand op2(this, rightChild);
3327         GPRTemporary result(this, Reuse, op1, op2);
3328         
3329         GPRReg reg1 = op1.gpr();
3330         GPRReg reg2 = op2.gpr();
3331         bitOp(op, reg1, reg2, result.gpr());
3332         
3333         int32Result(result.gpr(), node);
3334     }
3335 }
3336
3337 void SpeculativeJIT::emitUntypedRightShiftBitOp(Node* node)
3338 {
3339     J_JITOperation_EJJ snippetSlowPathFunction = node->op() == BitRShift
3340         ? operationValueBitRShift : operationValueBitURShift;
3341     JITRightShiftGenerator::ShiftType shiftType = node->op() == BitRShift
3342         ? JITRightShiftGenerator::SignedShift : JITRightShiftGenerator::UnsignedShift;
3343
3344     Edge& leftChild = node->child1();
3345     Edge& rightChild = node->child2();
3346
3347     if (isKnownNotNumber(leftChild.node()) || isKnownNotNumber(rightChild.node())) {
3348         JSValueOperand left(this, leftChild);
3349         JSValueOperand right(this, rightChild);
3350         JSValueRegs leftRegs = left.jsValueRegs();
3351         JSValueRegs rightRegs = right.jsValueRegs();
3352 #if USE(JSVALUE64)
3353         GPRTemporary result(this);
3354         JSValueRegs resultRegs = JSValueRegs(result.gpr());
3355 #else
3356         GPRTemporary resultTag(this);
3357         GPRTemporary resultPayload(this);
3358         JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3359 #endif
3360         flushRegisters();
3361         callOperation(snippetSlowPathFunction, resultRegs, leftRegs, rightRegs);
3362         m_jit.exceptionCheck();
3363
3364         jsValueResult(resultRegs, node);
3365         return;
3366     }
3367
3368     Optional<JSValueOperand> left;
3369     Optional<JSValueOperand> right;
3370
3371     JSValueRegs leftRegs;
3372     JSValueRegs rightRegs;
3373
3374     FPRTemporary leftNumber(this);
3375     FPRReg leftFPR = leftNumber.fpr();
3376
3377 #if USE(JSVALUE64)
3378     GPRTemporary result(this);
3379     JSValueRegs resultRegs = JSValueRegs(result.gpr());
3380     GPRTemporary scratch(this);
3381     GPRReg scratchGPR = scratch.gpr();
3382     FPRReg scratchFPR = InvalidFPRReg;
3383 #else
3384     GPRTemporary resultTag(this);
3385     GPRTemporary resultPayload(this);
3386     JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3387     GPRReg scratchGPR = resultTag.gpr();
3388     FPRTemporary fprScratch(this);
3389     FPRReg scratchFPR = fprScratch.fpr();
3390 #endif
3391
3392     SnippetOperand leftOperand;
3393     SnippetOperand rightOperand;
3394
3395     // The snippet generator does not support both operands being constant. If the left
3396     // operand is already const, we'll ignore the right operand's constness.
3397     if (leftChild->isInt32Constant())
3398         leftOperand.setConstInt32(leftChild->asInt32());
3399     else if (rightChild->isInt32Constant())
3400         rightOperand.setConstInt32(rightChild->asInt32());
3401
3402     RELEASE_ASSERT(!leftOperand.isConst() || !rightOperand.isConst());
3403
3404     if (!leftOperand.isConst()) {
3405         left = JSValueOperand(this, leftChild);
3406         leftRegs = left->jsValueRegs();
3407     }
3408     if (!rightOperand.isConst()) {
3409         right = JSValueOperand(this, rightChild);
3410         rightRegs = right->jsValueRegs();
3411     }
3412
3413     JITRightShiftGenerator gen(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs,
3414         leftFPR, scratchGPR, scratchFPR, shiftType);
3415     gen.generateFastPath(m_jit);
3416
3417     ASSERT(gen.didEmitFastPath());
3418     gen.endJumpList().append(m_jit.jump());
3419
3420     gen.slowPathJumpList().link(&m_jit);
3421     silentSpillAllRegisters(resultRegs);
3422
3423     if (leftOperand.isConst()) {
3424         leftRegs = resultRegs;
3425         m_jit.moveValue(leftChild->asJSValue(), leftRegs);
3426     } else if (rightOperand.isConst()) {
3427         rightRegs = resultRegs;
3428         m_jit.moveValue(rightChild->asJSValue(), rightRegs);
3429     }
3430
3431     callOperation(snippetSlowPathFunction, resultRegs, leftRegs, rightRegs);
3432
3433     silentFillAllRegisters(resultRegs);
3434     m_jit.exceptionCheck();
3435
3436     gen.endJumpList().link(&m_jit);
3437     jsValueResult(resultRegs, node);
3438     return;
3439 }
3440
3441 void SpeculativeJIT::compileShiftOp(Node* node)
3442 {
3443     NodeType op = node->op();
3444     Edge& leftChild = node->child1();
3445     Edge& rightChild = node->child2();
3446
3447     if (leftChild.useKind() == UntypedUse || rightChild.useKind() == UntypedUse) {
3448         switch (op) {
3449         case BitLShift:
3450             emitUntypedBitOp<JITLeftShiftGenerator, operationValueBitLShift>(node);
3451             return;
3452         case BitRShift:
3453         case BitURShift:
3454             emitUntypedRightShiftBitOp(node);
3455             return;
3456         default:
3457             RELEASE_ASSERT_NOT_REACHED();
3458         }
3459     }
3460
3461     if (rightChild->isInt32Constant()) {
3462         SpeculateInt32Operand op1(this, leftChild);
3463         GPRTemporary result(this, Reuse, op1);
3464
3465         shiftOp(op, op1.gpr(), rightChild->asInt32() & 0x1f, result.gpr());
3466
3467         int32Result(result.gpr(), node);
3468     } else {
3469         // Do not allow shift amount to be used as the result, MacroAssembler does not permit this.
3470         SpeculateInt32Operand op1(this, leftChild);
3471         SpeculateInt32Operand op2(this, rightChild);
3472         GPRTemporary result(this, Reuse, op1);
3473
3474         GPRReg reg1 = op1.gpr();
3475         GPRReg reg2 = op2.gpr();
3476         shiftOp(op, reg1, reg2, result.gpr());
3477
3478         int32Result(result.gpr(), node);
3479     }
3480 }
3481
3482 void SpeculativeJIT::compileValueAdd(Node* node)
3483 {
3484     Edge& leftChild = node->child1();
3485     Edge& rightChild = node->child2();
3486
3487     if (isKnownNotNumber(leftChild.node()) || isKnownNotNumber(rightChild.node())) {
3488         JSValueOperand left(this, leftChild);
3489         JSValueOperand right(this, rightChild);
3490         JSValueRegs leftRegs = left.jsValueRegs();
3491         JSValueRegs rightRegs = right.jsValueRegs();
3492 #if USE(JSVALUE64)
3493         GPRTemporary result(this);
3494         JSValueRegs resultRegs = JSValueRegs(result.gpr());
3495 #else
3496         GPRTemporary resultTag(this);
3497         GPRTemporary resultPayload(this);
3498         JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3499 #endif
3500         flushRegisters();
3501         callOperation(operationValueAddNotNumber, resultRegs, leftRegs, rightRegs);
3502         m_jit.exceptionCheck();
3503     
3504         jsValueResult(resultRegs, node);
3505         return;
3506     }
3507
3508 #if USE(JSVALUE64)
3509     bool needsScratchGPRReg = true;
3510     bool needsScratchFPRReg = false;
3511 #else
3512     bool needsScratchGPRReg = true;
3513     bool needsScratchFPRReg = true;
3514 #endif
3515
3516     ArithProfile* arithProfile = m_jit.graph().baselineCodeBlockFor(node->origin.semantic)->arithProfileForBytecodeOffset(node->origin.semantic.bytecodeIndex);
3517     JITAddIC* addIC = m_jit.codeBlock()->addJITAddIC(arithProfile);
3518     auto repatchingFunction = operationValueAddOptimize;
3519     auto nonRepatchingFunction = operationValueAdd;
3520     
3521     compileMathIC(node, addIC, needsScratchGPRReg, needsScratchFPRReg, repatchingFunction, nonRepatchingFunction);
3522 }
3523
3524 template <typename Generator, typename RepatchingFunction, typename NonRepatchingFunction>
3525 void SpeculativeJIT::compileMathIC(Node* node, JITBinaryMathIC<Generator>* mathIC, bool needsScratchGPRReg, bool needsScratchFPRReg, RepatchingFunction repatchingFunction, NonRepatchingFunction nonRepatchingFunction)
3526 {
3527     Edge& leftChild = node->child1();
3528     Edge& rightChild = node->child2();
3529
3530     Optional<JSValueOperand> left;
3531     Optional<JSValueOperand> right;
3532
3533     JSValueRegs leftRegs;
3534     JSValueRegs rightRegs;
3535
3536     FPRTemporary leftNumber(this);
3537     FPRTemporary rightNumber(this);
3538     FPRReg leftFPR = leftNumber.fpr();
3539     FPRReg rightFPR = rightNumber.fpr();
3540
3541     GPRReg scratchGPR = InvalidGPRReg;
3542     FPRReg scratchFPR = InvalidFPRReg;
3543
3544     Optional<FPRTemporary> fprScratch;
3545     if (needsScratchFPRReg) {
3546         fprScratch = FPRTemporary(this);
3547         scratchFPR = fprScratch->fpr();
3548     }
3549
3550 #if USE(JSVALUE64)
3551     Optional<GPRTemporary> gprScratch;
3552     if (needsScratchGPRReg) {
3553         gprScratch = GPRTemporary(this);
3554         scratchGPR = gprScratch->gpr();
3555     }
3556     GPRTemporary result(this);
3557     JSValueRegs resultRegs = JSValueRegs(result.gpr());
3558 #else
3559     GPRTemporary resultTag(this);
3560     GPRTemporary resultPayload(this);
3561     JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3562     if (needsScratchGPRReg)
3563         scratchGPR = resultRegs.tagGPR();
3564 #endif
3565
3566     SnippetOperand leftOperand(m_state.forNode(leftChild).resultType());
3567     SnippetOperand rightOperand(m_state.forNode(rightChild).resultType());
3568
3569     // The snippet generator does not support both operands being constant. If the left
3570     // operand is already const, we'll ignore the right operand's constness.
3571     if (leftChild->isInt32Constant())
3572         leftOperand.setConstInt32(leftChild->asInt32());
3573     else if (rightChild->isInt32Constant())
3574         rightOperand.setConstInt32(rightChild->asInt32());
3575
3576     ASSERT(!leftOperand.isConst() || !rightOperand.isConst());
3577     ASSERT(!(Generator::isLeftOperandValidConstant(leftOperand) && Generator::isRightOperandValidConstant(rightOperand)));
3578
3579     if (!Generator::isLeftOperandValidConstant(leftOperand)) {
3580         left = JSValueOperand(this, leftChild);
3581         leftRegs = left->jsValueRegs();
3582     }
3583     if (!Generator::isRightOperandValidConstant(rightOperand)) {
3584         right = JSValueOperand(this, rightChild);
3585         rightRegs = right->jsValueRegs();
3586     }
3587
3588 #if ENABLE(MATH_IC_STATS)
3589     auto inlineStart = m_jit.label();
3590 #endif
3591
3592     Box<MathICGenerationState> addICGenerationState = Box<MathICGenerationState>::create();
3593     mathIC->m_generator = Generator(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs, leftFPR, rightFPR, scratchGPR, scratchFPR);
3594
3595     bool shouldEmitProfiling = false;
3596     bool generatedInline = mathIC->generateInline(m_jit, *addICGenerationState, shouldEmitProfiling);
3597
3598     if (generatedInline) {
3599         ASSERT(!addICGenerationState->slowPathJumps.empty());
3600
3601         Vector<SilentRegisterSavePlan> savePlans;
3602         silentSpillAllRegistersImpl(false, savePlans, resultRegs);
3603
3604         auto done = m_jit.label();
3605
3606         addSlowPathGenerator([=, savePlans = WTFMove(savePlans)] () {
3607             addICGenerationState->slowPathJumps.link(&m_jit);
3608             addICGenerationState->slowPathStart = m_jit.label();
3609 #if ENABLE(MATH_IC_STATS)
3610             auto slowPathStart = m_jit.label();
3611 #endif
3612
3613             silentSpill(savePlans);
3614
3615             auto innerLeftRegs = leftRegs;
3616             auto innerRightRegs = rightRegs;
3617             if (Generator::isLeftOperandValidConstant(leftOperand)) {
3618                 innerLeftRegs = resultRegs;
3619                 m_jit.moveValue(leftChild->asJSValue(), innerLeftRegs);
3620             } else if (Generator::isRightOperandValidConstant(rightOperand)) {
3621                 innerRightRegs = resultRegs;
3622                 m_jit.moveValue(rightChild->asJSValue(), innerRightRegs);
3623             }
3624
3625             if (addICGenerationState->shouldSlowPathRepatch)
3626                 addICGenerationState->slowPathCall = callOperation(bitwise_cast<J_JITOperation_EJJMic>(repatchingFunction), resultRegs, innerLeftRegs, innerRightRegs, TrustedImmPtr(mathIC));
3627             else
3628                 addICGenerationState->slowPathCall = callOperation(nonRepatchingFunction, resultRegs, innerLeftRegs, innerRightRegs);
3629
3630             silentFill(savePlans);
3631             m_jit.exceptionCheck();
3632             m_jit.jump().linkTo(done, &m_jit);
3633
3634             m_jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
3635                 mathIC->finalizeInlineCode(*addICGenerationState, linkBuffer);
3636             });
3637
3638 #if ENABLE(MATH_IC_STATS)
3639             auto slowPathEnd = m_jit.label();
3640             m_jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
3641                 size_t size = static_cast<char*>(linkBuffer.locationOf(slowPathEnd).executableAddress()) - static_cast<char*>(linkBuffer.locationOf(slowPathStart).executableAddress());
3642                 mathIC->m_generatedCodeSize += size;
3643             });
3644 #endif
3645
3646         });
3647     } else {
3648         if (Generator::isLeftOperandValidConstant(leftOperand)) {
3649             left = JSValueOperand(this, leftChild);
3650             leftRegs = left->jsValueRegs();
3651         } else if (Generator::isRightOperandValidConstant(rightOperand)) {
3652             right = JSValueOperand(this, rightChild);
3653             rightRegs = right->jsValueRegs();
3654         }
3655
3656         flushRegisters();
3657         callOperation(nonRepatchingFunction, resultRegs, leftRegs, rightRegs);
3658         m_jit.exceptionCheck();
3659     }
3660
3661 #if ENABLE(MATH_IC_STATS)
3662     auto inlineEnd = m_jit.label();
3663     m_jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
3664         size_t size = static_cast<char*>(linkBuffer.locationOf(inlineEnd).executableAddress()) - static_cast<char*>(linkBuffer.locationOf(inlineStart).executableAddress());
3665         mathIC->m_generatedCodeSize += size;
3666     });
3667 #endif
3668
3669     jsValueResult(resultRegs, node);
3670     return;
3671 }
3672
3673 void SpeculativeJIT::compileInstanceOfCustom(Node* node)
3674 {
3675     // We could do something smarter here but this case is currently super rare and unless
3676     // Symbol.hasInstance becomes popular will likely remain that way.
3677
3678     JSValueOperand value(this, node->child1());
3679     SpeculateCellOperand constructor(this, node->child2());
3680     JSValueOperand hasInstanceValue(this, node->child3());
3681     GPRTemporary result(this);
3682
3683     JSValueRegs valueRegs = value.jsValueRegs();
3684     GPRReg constructorGPR = constructor.gpr();
3685     JSValueRegs hasInstanceRegs = hasInstanceValue.jsValueRegs();
3686     GPRReg resultGPR = result.gpr();
3687
3688     MacroAssembler::Jump slowCase = m_jit.jump();
3689
3690     addSlowPathGenerator(slowPathCall(slowCase, this, operationInstanceOfCustom, resultGPR, valueRegs, constructorGPR, hasInstanceRegs));
3691
3692     unblessedBooleanResult(resultGPR, node);
3693 }
3694
3695 void SpeculativeJIT::compileIsCellWithType(Node* node)
3696 {
3697     switch (node->child1().useKind()) {
3698     case UntypedUse: {
3699         JSValueOperand value(this, node->child1());
3700 #if USE(JSVALUE64)
3701         GPRTemporary result(this, Reuse, value);
3702 #else
3703         GPRTemporary result(this, Reuse, value, PayloadWord);
3704 #endif
3705
3706         JSValueRegs valueRegs = value.jsValueRegs();
3707         GPRReg resultGPR = result.gpr();
3708
3709         JITCompiler::Jump isNotCell = m_jit.branchIfNotCell(valueRegs);
3710
3711         m_jit.compare8(JITCompiler::Equal,
3712             JITCompiler::Address(valueRegs.payloadGPR(), JSCell::typeInfoTypeOffset()),
3713             TrustedImm32(node->queriedType()),
3714             resultGPR);
3715         blessBoolean(resultGPR);
3716         JITCompiler::Jump done = m_jit.jump();
3717
3718         isNotCell.link(&m_jit);
3719         moveFalseTo(resultGPR);
3720
3721         done.link(&m_jit);
3722         blessedBooleanResult(resultGPR, node);
3723         return;
3724     }
3725
3726     case CellUse: {
3727         SpeculateCellOperand cell(this, node->child1());
3728         GPRTemporary result(this, Reuse, cell);
3729
3730         GPRReg cellGPR = cell.gpr();
3731         GPRReg resultGPR = result.gpr();
3732
3733         m_jit.compare8(JITCompiler::Equal,
3734             JITCompiler::Address(cellGPR, JSCell::typeInfoTypeOffset()),
3735             TrustedImm32(node->queriedType()),
3736             resultGPR);
3737         blessBoolean(resultGPR);
3738         blessedBooleanResult(resultGPR, node);
3739         return;
3740     }
3741
3742     default:
3743         RELEASE_ASSERT_NOT_REACHED();
3744         break;
3745     }
3746 }
3747
3748 void SpeculativeJIT::compileIsTypedArrayView(Node* node)
3749 {
3750     JSValueOperand value(this, node->child1());
3751 #if USE(JSVALUE64)
3752     GPRTemporary result(this, Reuse, value);
3753 #else
3754     GPRTemporary result(this, Reuse, value, PayloadWord);
3755 #endif
3756
3757     JSValueRegs valueRegs = value.jsValueRegs();
3758     GPRReg resultGPR = result.gpr();
3759
3760     JITCompiler::Jump isNotCell = m_jit.branchIfNotCell(valueRegs);
3761
3762     m_jit.load8(JITCompiler::Address(valueRegs.payloadGPR(), JSCell::typeInfoTypeOffset()), resultGPR);
3763     m_jit.sub32(TrustedImm32(Int8ArrayType), resultGPR);
3764     m_jit.compare32(JITCompiler::BelowOrEqual,
3765         resultGPR,
3766         TrustedImm32(Float64ArrayType - Int8ArrayType),
3767         resultGPR);
3768     blessBoolean(resultGPR);
3769     JITCompiler::Jump done = m_jit.jump();
3770
3771     isNotCell.link(&m_jit);
3772     moveFalseTo(resultGPR);
3773
3774     done.link(&m_jit);
3775     blessedBooleanResult(resultGPR, node);
3776 }
3777
3778 void SpeculativeJIT::compileCallObjectConstructor(Node* node)
3779 {
3780     RELEASE_ASSERT(node->child1().useKind() == UntypedUse);
3781     JSValueOperand value(this, node->child1());
3782 #if USE(JSVALUE64)
3783     GPRTemporary result(this, Reuse, value);
3784 #else
3785     GPRTemporary result(this, Reuse, value, PayloadWord);
3786 #endif
3787
3788     JSValueRegs valueRegs = value.jsValueRegs();
3789     GPRReg resultGPR = result.gpr();
3790
3791     MacroAssembler::JumpList slowCases;
3792     slowCases.append(m_jit.branchIfNotCell(valueRegs));
3793     slowCases.append(m_jit.branchIfNotObject(valueRegs.payloadGPR()));
3794     m_jit.move(valueRegs.payloadGPR(), resultGPR);
3795
3796     addSlowPathGenerator(slowPathCall(slowCases, this, operationObjectConstructor, resultGPR, m_jit.globalObjectFor(node->origin.semantic), valueRegs));
3797     cellResult(resultGPR, node);
3798 }
3799
3800 void SpeculativeJIT::compileArithAdd(Node* node)
3801 {
3802     switch (node->binaryUseKind()) {
3803     case Int32Use: {
3804         ASSERT(!shouldCheckNegativeZero(node->arithMode()));
3805
3806         if (node->child2()->isInt32Constant()) {
3807             SpeculateInt32Operand op1(this, node->child1());
3808             GPRTemporary result(this, Reuse, op1);
3809
3810             GPRReg gpr1 = op1.gpr();
3811             int32_t imm2 = node->child2()->asInt32();
3812             GPRReg gprResult = result.gpr();
3813
3814             if (!shouldCheckOverflow(node->arithMode())) {
3815                 m_jit.add32(Imm32(imm2), gpr1, gprResult);
3816                 int32Result(gprResult, node);
3817                 return;
3818             }
3819
3820             MacroAssembler::Jump check = m_jit.branchAdd32(MacroAssembler::Overflow, gpr1, Imm32(imm2), gprResult);
3821             if (gpr1 == gprResult) {
3822                 speculationCheck(Overflow, JSValueRegs(), 0, check,
3823                     SpeculationRecovery(SpeculativeAddImmediate, gpr1, imm2));
3824             } else
3825                 speculationCheck(Overflow, JSValueRegs(), 0, check);
3826
3827             int32Result(gprResult, node);
3828             return;
3829         }
3830                 
3831         SpeculateInt32Operand op1(this, node->child1());
3832         SpeculateInt32Operand op2(this, node->child2());
3833         GPRTemporary result(this, Reuse, op1, op2);
3834
3835         GPRReg gpr1 = op1.gpr();
3836         GPRReg gpr2 = op2.gpr();
3837         GPRReg gprResult = result.gpr();
3838
3839         if (!shouldCheckOverflow(node->arithMode()))
3840             m_jit.add32(gpr1, gpr2, gprResult);
3841         else {
3842             MacroAssembler::Jump check = m_jit.branchAdd32(MacroAssembler::Overflow, gpr1, gpr2, gprResult);
3843                 
3844             if (gpr1 == gprResult)
3845                 speculationCheck(Overflow, JSValueRegs(), 0, check, SpeculationRecovery(SpeculativeAdd, gprResult, gpr2));
3846             else if (gpr2 == gprResult)
3847                 speculationCheck(Overflow, JSValueRegs(), 0, check, SpeculationRecovery(SpeculativeAdd, gprResult, gpr1));
3848             else
3849                 speculationCheck(Overflow, JSValueRegs(), 0, check);
3850         }
3851
3852         int32Result(gprResult, node);
3853         return;
3854     }
3855         
3856 #if USE(JSVALUE64)
3857     case Int52RepUse: {
3858         ASSERT(shouldCheckOverflow(node->arithMode()));
3859         ASSERT(!shouldCheckNegativeZero(node->arithMode()));
3860
3861         // Will we need an overflow check? If we can prove that neither input can be
3862         // Int52 then the overflow check will not be necessary.
3863         if (!m_state.forNode(node->child1()).couldBeType(SpecInt52Only)
3864             && !m_state.forNode(node->child2()).couldBeType(SpecInt52Only)) {
3865             SpeculateWhicheverInt52Operand op1(this, node->child1());
3866             SpeculateWhicheverInt52Operand op2(this, node->child2(), op1);
3867             GPRTemporary result(this, Reuse, op1);
3868             m_jit.add64(op1.gpr(), op2.gpr(), result.gpr());
3869             int52Result(result.gpr(), node, op1.format());
3870             return;
3871         }
3872         
3873         SpeculateInt52Operand op1(this, node->child1());
3874         SpeculateInt52Operand op2(this, node->child2());
3875         GPRTemporary result(this);
3876         m_jit.move(op1.gpr(), result.gpr());
3877         speculationCheck(
3878             Int52Overflow, JSValueRegs(), 0,
3879             m_jit.branchAdd64(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
3880         int52Result(result.gpr(), node);
3881         return;
3882     }
3883 #endif // USE(JSVALUE64)
3884     
3885     case DoubleRepUse: {
3886         SpeculateDoubleOperand op1(this, node->child1());
3887         SpeculateDoubleOperand op2(this, node->child2());