Unreviewed, rolling out r215476.
[WebKit-https.git] / Source / JavaScriptCore / dfg / DFGSpeculativeJIT.cpp
1 /*
2  * Copyright (C) 2011-2017 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "DFGSpeculativeJIT.h"
28
29 #if ENABLE(DFG_JIT)
30
31 #include "BinarySwitch.h"
32 #include "DFGAbstractInterpreterInlines.h"
33 #include "DFGArrayifySlowPathGenerator.h"
34 #include "DFGCallArrayAllocatorSlowPathGenerator.h"
35 #include "DFGCallCreateDirectArgumentsSlowPathGenerator.h"
36 #include "DFGCapabilities.h"
37 #include "DFGDOMJITPatchpointParams.h"
38 #include "DFGMayExit.h"
39 #include "DFGOSRExitFuzz.h"
40 #include "DFGSaneStringGetByValSlowPathGenerator.h"
41 #include "DFGSlowPathGenerator.h"
42 #include "DOMJITPatchpoint.h"
43 #include "DirectArguments.h"
44 #include "JITAddGenerator.h"
45 #include "JITBitAndGenerator.h"
46 #include "JITBitOrGenerator.h"
47 #include "JITBitXorGenerator.h"
48 #include "JITDivGenerator.h"
49 #include "JITLeftShiftGenerator.h"
50 #include "JITMulGenerator.h"
51 #include "JITRightShiftGenerator.h"
52 #include "JITSubGenerator.h"
53 #include "JSAsyncFunction.h"
54 #include "JSCInlines.h"
55 #include "JSEnvironmentRecord.h"
56 #include "JSFixedArray.h"
57 #include "JSGeneratorFunction.h"
58 #include "JSLexicalEnvironment.h"
59 #include "LinkBuffer.h"
60 #include "RegExpConstructor.h"
61 #include "ScopedArguments.h"
62 #include "ScratchRegisterAllocator.h"
63 #include <wtf/BitVector.h>
64 #include <wtf/Box.h>
65 #include <wtf/MathExtras.h>
66
67 namespace JSC { namespace DFG {
68
69 SpeculativeJIT::SpeculativeJIT(JITCompiler& jit)
70     : m_compileOkay(true)
71     , m_jit(jit)
72     , m_currentNode(0)
73     , m_lastGeneratedNode(LastNodeType)
74     , m_indexInBlock(0)
75     , m_generationInfo(m_jit.graph().frameRegisterCount())
76     , m_state(m_jit.graph())
77     , m_interpreter(m_jit.graph(), m_state)
78     , m_stream(&jit.jitCode()->variableEventStream)
79     , m_minifiedGraph(&jit.jitCode()->minifiedDFG)
80 {
81 }
82
83 SpeculativeJIT::~SpeculativeJIT()
84 {
85 }
86
87 void SpeculativeJIT::emitAllocateRawObject(GPRReg resultGPR, RegisteredStructure structure, GPRReg storageGPR, unsigned numElements, unsigned vectorLength)
88 {
89     IndexingType indexingType = structure->indexingType();
90     bool hasIndexingHeader = hasIndexedProperties(indexingType);
91
92     unsigned inlineCapacity = structure->inlineCapacity();
93     unsigned outOfLineCapacity = structure->outOfLineCapacity();
94     
95     GPRTemporary scratch(this);
96     GPRTemporary scratch2(this);
97     GPRReg scratchGPR = scratch.gpr();
98     GPRReg scratch2GPR = scratch2.gpr();
99
100     ASSERT(vectorLength >= numElements);
101     vectorLength = Butterfly::optimalContiguousVectorLength(structure.get(), vectorLength);
102     
103     JITCompiler::JumpList slowCases;
104
105     size_t size = 0;
106     if (hasIndexingHeader)
107         size += vectorLength * sizeof(JSValue) + sizeof(IndexingHeader);
108     size += outOfLineCapacity * sizeof(JSValue);
109
110     m_jit.move(TrustedImmPtr(0), storageGPR);
111     
112     if (size) {
113         if (MarkedAllocator* allocator = m_jit.vm()->auxiliarySpace.allocatorFor(size)) {
114             m_jit.move(TrustedImmPtr(allocator), scratchGPR);
115             m_jit.emitAllocate(storageGPR, allocator, scratchGPR, scratch2GPR, slowCases);
116             
117             m_jit.addPtr(
118                 TrustedImm32(outOfLineCapacity * sizeof(JSValue) + sizeof(IndexingHeader)),
119                 storageGPR);
120             
121             if (hasIndexingHeader)
122                 m_jit.store32(TrustedImm32(vectorLength), MacroAssembler::Address(storageGPR, Butterfly::offsetOfVectorLength()));
123         } else
124             slowCases.append(m_jit.jump());
125     }
126
127     size_t allocationSize = JSFinalObject::allocationSize(inlineCapacity);
128     MarkedAllocator* allocatorPtr = subspaceFor<JSFinalObject>(*m_jit.vm())->allocatorFor(allocationSize);
129     if (allocatorPtr) {
130         m_jit.move(TrustedImmPtr(allocatorPtr), scratchGPR);
131         emitAllocateJSObject(resultGPR, allocatorPtr, scratchGPR, TrustedImmPtr(structure), storageGPR, scratch2GPR, slowCases);
132         m_jit.emitInitializeInlineStorage(resultGPR, structure->inlineCapacity());
133     } else
134         slowCases.append(m_jit.jump());
135
136     // I want a slow path that also loads out the storage pointer, and that's
137     // what this custom CallArrayAllocatorSlowPathGenerator gives me. It's a lot
138     // of work for a very small piece of functionality. :-/
139     addSlowPathGenerator(std::make_unique<CallArrayAllocatorSlowPathGenerator>(
140         slowCases, this, operationNewRawObject, resultGPR, storageGPR,
141         structure, vectorLength));
142
143     if (numElements < vectorLength) {
144 #if USE(JSVALUE64)
145         if (hasDouble(structure->indexingType()))
146             m_jit.move(TrustedImm64(bitwise_cast<int64_t>(PNaN)), scratchGPR);
147         else
148             m_jit.move(TrustedImm64(JSValue::encode(JSValue())), scratchGPR);
149         for (unsigned i = numElements; i < vectorLength; ++i)
150             m_jit.store64(scratchGPR, MacroAssembler::Address(storageGPR, sizeof(double) * i));
151 #else
152         EncodedValueDescriptor value;
153         if (hasDouble(structure->indexingType()))
154             value.asInt64 = JSValue::encode(JSValue(JSValue::EncodeAsDouble, PNaN));
155         else
156             value.asInt64 = JSValue::encode(JSValue());
157         for (unsigned i = numElements; i < vectorLength; ++i) {
158             m_jit.store32(TrustedImm32(value.asBits.tag), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
159             m_jit.store32(TrustedImm32(value.asBits.payload), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
160         }
161 #endif
162     }
163     
164     if (hasIndexingHeader)
165         m_jit.store32(TrustedImm32(numElements), MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()));
166     
167     m_jit.emitInitializeOutOfLineStorage(storageGPR, structure->outOfLineCapacity());
168     
169     m_jit.mutatorFence(*m_jit.vm());
170 }
171
172 void SpeculativeJIT::emitGetLength(InlineCallFrame* inlineCallFrame, GPRReg lengthGPR, bool includeThis)
173 {
174     if (inlineCallFrame && !inlineCallFrame->isVarargs())
175         m_jit.move(TrustedImm32(inlineCallFrame->arguments.size() - !includeThis), lengthGPR);
176     else {
177         VirtualRegister argumentCountRegister = m_jit.argumentCount(inlineCallFrame);
178         m_jit.load32(JITCompiler::payloadFor(argumentCountRegister), lengthGPR);
179         if (!includeThis)
180             m_jit.sub32(TrustedImm32(1), lengthGPR);
181     }
182 }
183
184 void SpeculativeJIT::emitGetLength(CodeOrigin origin, GPRReg lengthGPR, bool includeThis)
185 {
186     emitGetLength(origin.inlineCallFrame, lengthGPR, includeThis);
187 }
188
189 void SpeculativeJIT::emitGetCallee(CodeOrigin origin, GPRReg calleeGPR)
190 {
191     if (origin.inlineCallFrame) {
192         if (origin.inlineCallFrame->isClosureCall) {
193             m_jit.loadPtr(
194                 JITCompiler::addressFor(origin.inlineCallFrame->calleeRecovery.virtualRegister()),
195                 calleeGPR);
196         } else {
197             m_jit.move(
198                 TrustedImmPtr::weakPointer(m_jit.graph(), origin.inlineCallFrame->calleeRecovery.constant().asCell()),
199                 calleeGPR);
200         }
201     } else
202         m_jit.loadPtr(JITCompiler::addressFor(CallFrameSlot::callee), calleeGPR);
203 }
204
205 void SpeculativeJIT::emitGetArgumentStart(CodeOrigin origin, GPRReg startGPR)
206 {
207     m_jit.addPtr(
208         TrustedImm32(
209             JITCompiler::argumentsStart(origin).offset() * static_cast<int>(sizeof(Register))),
210         GPRInfo::callFrameRegister, startGPR);
211 }
212
213 MacroAssembler::Jump SpeculativeJIT::emitOSRExitFuzzCheck()
214 {
215     if (!Options::useOSRExitFuzz()
216         || !canUseOSRExitFuzzing(m_jit.graph().baselineCodeBlockFor(m_origin.semantic))
217         || !doOSRExitFuzzing())
218         return MacroAssembler::Jump();
219     
220     MacroAssembler::Jump result;
221     
222     m_jit.pushToSave(GPRInfo::regT0);
223     m_jit.load32(&g_numberOfOSRExitFuzzChecks, GPRInfo::regT0);
224     m_jit.add32(TrustedImm32(1), GPRInfo::regT0);
225     m_jit.store32(GPRInfo::regT0, &g_numberOfOSRExitFuzzChecks);
226     unsigned atOrAfter = Options::fireOSRExitFuzzAtOrAfter();
227     unsigned at = Options::fireOSRExitFuzzAt();
228     if (at || atOrAfter) {
229         unsigned threshold;
230         MacroAssembler::RelationalCondition condition;
231         if (atOrAfter) {
232             threshold = atOrAfter;
233             condition = MacroAssembler::Below;
234         } else {
235             threshold = at;
236             condition = MacroAssembler::NotEqual;
237         }
238         MacroAssembler::Jump ok = m_jit.branch32(
239             condition, GPRInfo::regT0, MacroAssembler::TrustedImm32(threshold));
240         m_jit.popToRestore(GPRInfo::regT0);
241         result = m_jit.jump();
242         ok.link(&m_jit);
243     }
244     m_jit.popToRestore(GPRInfo::regT0);
245     
246     return result;
247 }
248
249 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail)
250 {
251     if (!m_compileOkay)
252         return;
253     JITCompiler::Jump fuzzJump = emitOSRExitFuzzCheck();
254     if (fuzzJump.isSet()) {
255         JITCompiler::JumpList jumpsToFail;
256         jumpsToFail.append(fuzzJump);
257         jumpsToFail.append(jumpToFail);
258         m_jit.appendExitInfo(jumpsToFail);
259     } else
260         m_jit.appendExitInfo(jumpToFail);
261     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(m_currentNode, node), this, m_stream->size()));
262 }
263
264 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, const MacroAssembler::JumpList& jumpsToFail)
265 {
266     if (!m_compileOkay)
267         return;
268     JITCompiler::Jump fuzzJump = emitOSRExitFuzzCheck();
269     if (fuzzJump.isSet()) {
270         JITCompiler::JumpList myJumpsToFail;
271         myJumpsToFail.append(jumpsToFail);
272         myJumpsToFail.append(fuzzJump);
273         m_jit.appendExitInfo(myJumpsToFail);
274     } else
275         m_jit.appendExitInfo(jumpsToFail);
276     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(m_currentNode, node), this, m_stream->size()));
277 }
278
279 OSRExitJumpPlaceholder SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node)
280 {
281     if (!m_compileOkay)
282         return OSRExitJumpPlaceholder();
283     unsigned index = m_jit.jitCode()->osrExit.size();
284     m_jit.appendExitInfo();
285     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(m_currentNode, node), this, m_stream->size()));
286     return OSRExitJumpPlaceholder(index);
287 }
288
289 OSRExitJumpPlaceholder SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse)
290 {
291     return speculationCheck(kind, jsValueSource, nodeUse.node());
292 }
293
294 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail)
295 {
296     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail);
297 }
298
299 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, const MacroAssembler::JumpList& jumpsToFail)
300 {
301     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpsToFail);
302 }
303
304 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
305 {
306     if (!m_compileOkay)
307         return;
308     unsigned recoveryIndex = m_jit.jitCode()->appendSpeculationRecovery(recovery);
309     m_jit.appendExitInfo(jumpToFail);
310     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(m_currentNode, node), this, m_stream->size(), recoveryIndex));
311 }
312
313 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
314 {
315     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail, recovery);
316 }
317
318 void SpeculativeJIT::emitInvalidationPoint(Node* node)
319 {
320     if (!m_compileOkay)
321         return;
322     OSRExitCompilationInfo& info = m_jit.appendExitInfo(JITCompiler::JumpList());
323     m_jit.jitCode()->appendOSRExit(OSRExit(
324         UncountableInvalidation, JSValueSource(), MethodOfGettingAValueProfile(),
325         this, m_stream->size()));
326     info.m_replacementSource = m_jit.watchpointLabel();
327     ASSERT(info.m_replacementSource.isSet());
328     noResult(node);
329 }
330
331 void SpeculativeJIT::unreachable(Node* node)
332 {
333     m_compileOkay = false;
334     m_jit.abortWithReason(DFGUnreachableNode, node->op());
335 }
336
337 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Node* node)
338 {
339     if (!m_compileOkay)
340         return;
341     speculationCheck(kind, jsValueRegs, node, m_jit.jump());
342     m_compileOkay = false;
343     if (verboseCompilationEnabled())
344         dataLog("Bailing compilation.\n");
345 }
346
347 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Edge nodeUse)
348 {
349     terminateSpeculativeExecution(kind, jsValueRegs, nodeUse.node());
350 }
351
352 void SpeculativeJIT::typeCheck(JSValueSource source, Edge edge, SpeculatedType typesPassedThrough, MacroAssembler::Jump jumpToFail, ExitKind exitKind)
353 {
354     ASSERT(needsTypeCheck(edge, typesPassedThrough));
355     m_interpreter.filter(edge, typesPassedThrough);
356     speculationCheck(exitKind, source, edge.node(), jumpToFail);
357 }
358
359 RegisterSet SpeculativeJIT::usedRegisters()
360 {
361     RegisterSet result;
362     
363     for (unsigned i = GPRInfo::numberOfRegisters; i--;) {
364         GPRReg gpr = GPRInfo::toRegister(i);
365         if (m_gprs.isInUse(gpr))
366             result.set(gpr);
367     }
368     for (unsigned i = FPRInfo::numberOfRegisters; i--;) {
369         FPRReg fpr = FPRInfo::toRegister(i);
370         if (m_fprs.isInUse(fpr))
371             result.set(fpr);
372     }
373     
374     result.merge(RegisterSet::stubUnavailableRegisters());
375     
376     return result;
377 }
378
379 void SpeculativeJIT::addSlowPathGenerator(std::unique_ptr<SlowPathGenerator> slowPathGenerator)
380 {
381     m_slowPathGenerators.append(WTFMove(slowPathGenerator));
382 }
383
384 void SpeculativeJIT::addSlowPathGenerator(std::function<void()> lambda)
385 {
386     m_slowPathLambdas.append(SlowPathLambda{ lambda, m_currentNode, static_cast<unsigned>(m_stream->size()) });
387 }
388
389 void SpeculativeJIT::runSlowPathGenerators(PCToCodeOriginMapBuilder& pcToCodeOriginMapBuilder)
390 {
391     for (auto& slowPathGenerator : m_slowPathGenerators) {
392         pcToCodeOriginMapBuilder.appendItem(m_jit.labelIgnoringWatchpoints(), slowPathGenerator->origin().semantic);
393         slowPathGenerator->generate(this);
394     }
395     for (auto& slowPathLambda : m_slowPathLambdas) {
396         Node* currentNode = slowPathLambda.currentNode;
397         m_currentNode = currentNode;
398         m_outOfLineStreamIndex = slowPathLambda.streamIndex;
399         pcToCodeOriginMapBuilder.appendItem(m_jit.labelIgnoringWatchpoints(), currentNode->origin.semantic);
400         slowPathLambda.generator();
401         m_outOfLineStreamIndex = std::nullopt;
402     }
403 }
404
405 void SpeculativeJIT::clearGenerationInfo()
406 {
407     for (unsigned i = 0; i < m_generationInfo.size(); ++i)
408         m_generationInfo[i] = GenerationInfo();
409     m_gprs = RegisterBank<GPRInfo>();
410     m_fprs = RegisterBank<FPRInfo>();
411 }
412
413 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForGPR(VirtualRegister spillMe, GPRReg source)
414 {
415     GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
416     Node* node = info.node();
417     DataFormat registerFormat = info.registerFormat();
418     ASSERT(registerFormat != DataFormatNone);
419     ASSERT(registerFormat != DataFormatDouble);
420         
421     SilentSpillAction spillAction;
422     SilentFillAction fillAction;
423         
424     if (!info.needsSpill())
425         spillAction = DoNothingForSpill;
426     else {
427 #if USE(JSVALUE64)
428         ASSERT(info.gpr() == source);
429         if (registerFormat == DataFormatInt32)
430             spillAction = Store32Payload;
431         else if (registerFormat == DataFormatCell || registerFormat == DataFormatStorage)
432             spillAction = StorePtr;
433         else if (registerFormat == DataFormatInt52 || registerFormat == DataFormatStrictInt52)
434             spillAction = Store64;
435         else {
436             ASSERT(registerFormat & DataFormatJS);
437             spillAction = Store64;
438         }
439 #elif USE(JSVALUE32_64)
440         if (registerFormat & DataFormatJS) {
441             ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
442             spillAction = source == info.tagGPR() ? Store32Tag : Store32Payload;
443         } else {
444             ASSERT(info.gpr() == source);
445             spillAction = Store32Payload;
446         }
447 #endif
448     }
449         
450     if (registerFormat == DataFormatInt32) {
451         ASSERT(info.gpr() == source);
452         ASSERT(isJSInt32(info.registerFormat()));
453         if (node->hasConstant()) {
454             ASSERT(node->isInt32Constant());
455             fillAction = SetInt32Constant;
456         } else
457             fillAction = Load32Payload;
458     } else if (registerFormat == DataFormatBoolean) {
459 #if USE(JSVALUE64)
460         RELEASE_ASSERT_NOT_REACHED();
461 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
462         fillAction = DoNothingForFill;
463 #endif
464 #elif USE(JSVALUE32_64)
465         ASSERT(info.gpr() == source);
466         if (node->hasConstant()) {
467             ASSERT(node->isBooleanConstant());
468             fillAction = SetBooleanConstant;
469         } else
470             fillAction = Load32Payload;
471 #endif
472     } else if (registerFormat == DataFormatCell) {
473         ASSERT(info.gpr() == source);
474         if (node->hasConstant()) {
475             DFG_ASSERT(m_jit.graph(), m_currentNode, node->isCellConstant());
476             node->asCell(); // To get the assertion.
477             fillAction = SetCellConstant;
478         } else {
479 #if USE(JSVALUE64)
480             fillAction = LoadPtr;
481 #else
482             fillAction = Load32Payload;
483 #endif
484         }
485     } else if (registerFormat == DataFormatStorage) {
486         ASSERT(info.gpr() == source);
487         fillAction = LoadPtr;
488     } else if (registerFormat == DataFormatInt52) {
489         if (node->hasConstant())
490             fillAction = SetInt52Constant;
491         else if (info.spillFormat() == DataFormatInt52)
492             fillAction = Load64;
493         else if (info.spillFormat() == DataFormatStrictInt52)
494             fillAction = Load64ShiftInt52Left;
495         else if (info.spillFormat() == DataFormatNone)
496             fillAction = Load64;
497         else {
498             RELEASE_ASSERT_NOT_REACHED();
499 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
500             fillAction = Load64; // Make GCC happy.
501 #endif
502         }
503     } else if (registerFormat == DataFormatStrictInt52) {
504         if (node->hasConstant())
505             fillAction = SetStrictInt52Constant;
506         else if (info.spillFormat() == DataFormatInt52)
507             fillAction = Load64ShiftInt52Right;
508         else if (info.spillFormat() == DataFormatStrictInt52)
509             fillAction = Load64;
510         else if (info.spillFormat() == DataFormatNone)
511             fillAction = Load64;
512         else {
513             RELEASE_ASSERT_NOT_REACHED();
514 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
515             fillAction = Load64; // Make GCC happy.
516 #endif
517         }
518     } else {
519         ASSERT(registerFormat & DataFormatJS);
520 #if USE(JSVALUE64)
521         ASSERT(info.gpr() == source);
522         if (node->hasConstant()) {
523             if (node->isCellConstant())
524                 fillAction = SetTrustedJSConstant;
525             else
526                 fillAction = SetJSConstant;
527         } else if (info.spillFormat() == DataFormatInt32) {
528             ASSERT(registerFormat == DataFormatJSInt32);
529             fillAction = Load32PayloadBoxInt;
530         } else
531             fillAction = Load64;
532 #else
533         ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
534         if (node->hasConstant())
535             fillAction = info.tagGPR() == source ? SetJSConstantTag : SetJSConstantPayload;
536         else if (info.payloadGPR() == source)
537             fillAction = Load32Payload;
538         else { // Fill the Tag
539             switch (info.spillFormat()) {
540             case DataFormatInt32:
541                 ASSERT(registerFormat == DataFormatJSInt32);
542                 fillAction = SetInt32Tag;
543                 break;
544             case DataFormatCell:
545                 ASSERT(registerFormat == DataFormatJSCell);
546                 fillAction = SetCellTag;
547                 break;
548             case DataFormatBoolean:
549                 ASSERT(registerFormat == DataFormatJSBoolean);
550                 fillAction = SetBooleanTag;
551                 break;
552             default:
553                 fillAction = Load32Tag;
554                 break;
555             }
556         }
557 #endif
558     }
559         
560     return SilentRegisterSavePlan(spillAction, fillAction, node, source);
561 }
562     
563 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForFPR(VirtualRegister spillMe, FPRReg source)
564 {
565     GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
566     Node* node = info.node();
567     ASSERT(info.registerFormat() == DataFormatDouble);
568
569     SilentSpillAction spillAction;
570     SilentFillAction fillAction;
571         
572     if (!info.needsSpill())
573         spillAction = DoNothingForSpill;
574     else {
575         ASSERT(!node->hasConstant());
576         ASSERT(info.spillFormat() == DataFormatNone);
577         ASSERT(info.fpr() == source);
578         spillAction = StoreDouble;
579     }
580         
581 #if USE(JSVALUE64)
582     if (node->hasConstant()) {
583         node->asNumber(); // To get the assertion.
584         fillAction = SetDoubleConstant;
585     } else {
586         ASSERT(info.spillFormat() == DataFormatNone || info.spillFormat() == DataFormatDouble);
587         fillAction = LoadDouble;
588     }
589 #elif USE(JSVALUE32_64)
590     ASSERT(info.registerFormat() == DataFormatDouble);
591     if (node->hasConstant()) {
592         node->asNumber(); // To get the assertion.
593         fillAction = SetDoubleConstant;
594     } else
595         fillAction = LoadDouble;
596 #endif
597
598     return SilentRegisterSavePlan(spillAction, fillAction, node, source);
599 }
600     
601 void SpeculativeJIT::silentSpill(const SilentRegisterSavePlan& plan)
602 {
603     switch (plan.spillAction()) {
604     case DoNothingForSpill:
605         break;
606     case Store32Tag:
607         m_jit.store32(plan.gpr(), JITCompiler::tagFor(plan.node()->virtualRegister()));
608         break;
609     case Store32Payload:
610         m_jit.store32(plan.gpr(), JITCompiler::payloadFor(plan.node()->virtualRegister()));
611         break;
612     case StorePtr:
613         m_jit.storePtr(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
614         break;
615 #if USE(JSVALUE64)
616     case Store64:
617         m_jit.store64(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
618         break;
619 #endif
620     case StoreDouble:
621         m_jit.storeDouble(plan.fpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
622         break;
623     default:
624         RELEASE_ASSERT_NOT_REACHED();
625     }
626 }
627     
628 void SpeculativeJIT::silentFill(const SilentRegisterSavePlan& plan, GPRReg canTrample)
629 {
630 #if USE(JSVALUE32_64)
631     UNUSED_PARAM(canTrample);
632 #endif
633     switch (plan.fillAction()) {
634     case DoNothingForFill:
635         break;
636     case SetInt32Constant:
637         m_jit.move(Imm32(plan.node()->asInt32()), plan.gpr());
638         break;
639 #if USE(JSVALUE64)
640     case SetInt52Constant:
641         m_jit.move(Imm64(plan.node()->asAnyInt() << JSValue::int52ShiftAmount), plan.gpr());
642         break;
643     case SetStrictInt52Constant:
644         m_jit.move(Imm64(plan.node()->asAnyInt()), plan.gpr());
645         break;
646 #endif // USE(JSVALUE64)
647     case SetBooleanConstant:
648         m_jit.move(TrustedImm32(plan.node()->asBoolean()), plan.gpr());
649         break;
650     case SetCellConstant:
651         ASSERT(plan.node()->constant()->value().isCell());
652         m_jit.move(TrustedImmPtr(plan.node()->constant()), plan.gpr());
653         break;
654 #if USE(JSVALUE64)
655     case SetTrustedJSConstant:
656         m_jit.move(valueOfJSConstantAsImm64(plan.node()).asTrustedImm64(), plan.gpr());
657         break;
658     case SetJSConstant:
659         m_jit.move(valueOfJSConstantAsImm64(plan.node()), plan.gpr());
660         break;
661     case SetDoubleConstant:
662         m_jit.move(Imm64(reinterpretDoubleToInt64(plan.node()->asNumber())), canTrample);
663         m_jit.move64ToDouble(canTrample, plan.fpr());
664         break;
665     case Load32PayloadBoxInt:
666         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
667         m_jit.or64(GPRInfo::tagTypeNumberRegister, plan.gpr());
668         break;
669     case Load32PayloadConvertToInt52:
670         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
671         m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
672         m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
673         break;
674     case Load32PayloadSignExtend:
675         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
676         m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
677         break;
678 #else
679     case SetJSConstantTag:
680         m_jit.move(Imm32(plan.node()->asJSValue().tag()), plan.gpr());
681         break;
682     case SetJSConstantPayload:
683         m_jit.move(Imm32(plan.node()->asJSValue().payload()), plan.gpr());
684         break;
685     case SetInt32Tag:
686         m_jit.move(TrustedImm32(JSValue::Int32Tag), plan.gpr());
687         break;
688     case SetCellTag:
689         m_jit.move(TrustedImm32(JSValue::CellTag), plan.gpr());
690         break;
691     case SetBooleanTag:
692         m_jit.move(TrustedImm32(JSValue::BooleanTag), plan.gpr());
693         break;
694     case SetDoubleConstant:
695         m_jit.loadDouble(TrustedImmPtr(m_jit.addressOfDoubleConstant(plan.node())), plan.fpr());
696         break;
697 #endif
698     case Load32Tag:
699         m_jit.load32(JITCompiler::tagFor(plan.node()->virtualRegister()), plan.gpr());
700         break;
701     case Load32Payload:
702         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
703         break;
704     case LoadPtr:
705         m_jit.loadPtr(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
706         break;
707 #if USE(JSVALUE64)
708     case Load64:
709         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
710         break;
711     case Load64ShiftInt52Right:
712         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
713         m_jit.rshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
714         break;
715     case Load64ShiftInt52Left:
716         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
717         m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
718         break;
719 #endif
720     case LoadDouble:
721         m_jit.loadDouble(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.fpr());
722         break;
723     default:
724         RELEASE_ASSERT_NOT_REACHED();
725     }
726 }
727     
728 JITCompiler::Jump SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode, IndexingType shape)
729 {
730     switch (arrayMode.arrayClass()) {
731     case Array::OriginalArray: {
732         CRASH();
733 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
734         JITCompiler::Jump result; // I already know that VC++ takes unkindly to the expression "return Jump()", so I'm doing it this way in anticipation of someone eventually using VC++ to compile the DFG.
735         return result;
736 #endif
737     }
738         
739     case Array::Array:
740         m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
741         return m_jit.branch32(
742             MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | shape));
743         
744     case Array::NonArray:
745     case Array::OriginalNonArray:
746         m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
747         return m_jit.branch32(
748             MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
749         
750     case Array::PossiblyArray:
751         m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
752         return m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
753     }
754     
755     RELEASE_ASSERT_NOT_REACHED();
756     return JITCompiler::Jump();
757 }
758
759 JITCompiler::JumpList SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode)
760 {
761     JITCompiler::JumpList result;
762     
763     switch (arrayMode.type()) {
764     case Array::Int32:
765     case Array::Double:
766     case Array::Contiguous:
767     case Array::Undecided:
768         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, arrayMode.shapeMask());
769
770     case Array::ArrayStorage:
771     case Array::SlowPutArrayStorage: {
772         ASSERT(!arrayMode.isJSArrayWithOriginalStructure());
773         
774         if (arrayMode.isJSArray()) {
775             if (arrayMode.isSlowPut()) {
776                 result.append(
777                     m_jit.branchTest32(
778                         MacroAssembler::Zero, tempGPR, MacroAssembler::TrustedImm32(IsArray)));
779                 m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
780                 m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
781                 result.append(
782                     m_jit.branch32(
783                         MacroAssembler::Above, tempGPR,
784                         TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
785                 break;
786             }
787             m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
788             result.append(
789                 m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | ArrayStorageShape)));
790             break;
791         }
792         m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
793         if (arrayMode.isSlowPut()) {
794             m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
795             result.append(
796                 m_jit.branch32(
797                     MacroAssembler::Above, tempGPR,
798                     TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
799             break;
800         }
801         result.append(
802             m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(ArrayStorageShape)));
803         break;
804     }
805     default:
806         CRASH();
807         break;
808     }
809     
810     return result;
811 }
812
813 void SpeculativeJIT::checkArray(Node* node)
814 {
815     ASSERT(node->arrayMode().isSpecific());
816     ASSERT(!node->arrayMode().doesConversion());
817     
818     SpeculateCellOperand base(this, node->child1());
819     GPRReg baseReg = base.gpr();
820     
821     if (node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1()))) {
822         noResult(m_currentNode);
823         return;
824     }
825     
826     const ClassInfo* expectedClassInfo = 0;
827     
828     switch (node->arrayMode().type()) {
829     case Array::AnyTypedArray:
830     case Array::String:
831         RELEASE_ASSERT_NOT_REACHED(); // Should have been a Phantom(String:)
832         break;
833     case Array::Int32:
834     case Array::Double:
835     case Array::Contiguous:
836     case Array::Undecided:
837     case Array::ArrayStorage:
838     case Array::SlowPutArrayStorage: {
839         GPRTemporary temp(this);
840         GPRReg tempGPR = temp.gpr();
841         m_jit.load8(MacroAssembler::Address(baseReg, JSCell::indexingTypeAndMiscOffset()), tempGPR);
842         speculationCheck(
843             BadIndexingType, JSValueSource::unboxedCell(baseReg), 0,
844             jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
845         
846         noResult(m_currentNode);
847         return;
848     }
849     case Array::DirectArguments:
850         speculateCellTypeWithoutTypeFiltering(node->child1(), baseReg, DirectArgumentsType);
851         noResult(m_currentNode);
852         return;
853     case Array::ScopedArguments:
854         speculateCellTypeWithoutTypeFiltering(node->child1(), baseReg, ScopedArgumentsType);
855         noResult(m_currentNode);
856         return;
857     default:
858         speculateCellTypeWithoutTypeFiltering(
859             node->child1(), baseReg,
860             typeForTypedArrayType(node->arrayMode().typedArrayType()));
861         noResult(m_currentNode);
862         return;
863     }
864     
865     RELEASE_ASSERT(expectedClassInfo);
866     
867     GPRTemporary temp(this);
868     GPRTemporary temp2(this);
869     m_jit.emitLoadStructure(*m_jit.vm(), baseReg, temp.gpr(), temp2.gpr());
870     speculationCheck(
871         BadType, JSValueSource::unboxedCell(baseReg), node,
872         m_jit.branchPtr(
873             MacroAssembler::NotEqual,
874             MacroAssembler::Address(temp.gpr(), Structure::classInfoOffset()),
875             TrustedImmPtr(expectedClassInfo)));
876     
877     noResult(m_currentNode);
878 }
879
880 void SpeculativeJIT::arrayify(Node* node, GPRReg baseReg, GPRReg propertyReg)
881 {
882     ASSERT(node->arrayMode().doesConversion());
883     
884     GPRTemporary temp(this);
885     GPRTemporary structure;
886     GPRReg tempGPR = temp.gpr();
887     GPRReg structureGPR = InvalidGPRReg;
888     
889     if (node->op() != ArrayifyToStructure) {
890         GPRTemporary realStructure(this);
891         structure.adopt(realStructure);
892         structureGPR = structure.gpr();
893     }
894         
895     // We can skip all that comes next if we already have array storage.
896     MacroAssembler::JumpList slowPath;
897     
898     if (node->op() == ArrayifyToStructure) {
899         slowPath.append(m_jit.branchWeakStructure(
900             JITCompiler::NotEqual,
901             JITCompiler::Address(baseReg, JSCell::structureIDOffset()),
902             node->structure()));
903     } else {
904         m_jit.load8(
905             MacroAssembler::Address(baseReg, JSCell::indexingTypeAndMiscOffset()), tempGPR);
906         
907         slowPath.append(jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
908     }
909     
910     addSlowPathGenerator(std::make_unique<ArrayifySlowPathGenerator>(
911         slowPath, this, node, baseReg, propertyReg, tempGPR, structureGPR));
912     
913     noResult(m_currentNode);
914 }
915
916 void SpeculativeJIT::arrayify(Node* node)
917 {
918     ASSERT(node->arrayMode().isSpecific());
919     
920     SpeculateCellOperand base(this, node->child1());
921     
922     if (!node->child2()) {
923         arrayify(node, base.gpr(), InvalidGPRReg);
924         return;
925     }
926     
927     SpeculateInt32Operand property(this, node->child2());
928     
929     arrayify(node, base.gpr(), property.gpr());
930 }
931
932 GPRReg SpeculativeJIT::fillStorage(Edge edge)
933 {
934     VirtualRegister virtualRegister = edge->virtualRegister();
935     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
936     
937     switch (info.registerFormat()) {
938     case DataFormatNone: {
939         if (info.spillFormat() == DataFormatStorage) {
940             GPRReg gpr = allocate();
941             m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
942             m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr);
943             info.fillStorage(*m_stream, gpr);
944             return gpr;
945         }
946         
947         // Must be a cell; fill it as a cell and then return the pointer.
948         return fillSpeculateCell(edge);
949     }
950         
951     case DataFormatStorage: {
952         GPRReg gpr = info.gpr();
953         m_gprs.lock(gpr);
954         return gpr;
955     }
956         
957     default:
958         return fillSpeculateCell(edge);
959     }
960 }
961
962 void SpeculativeJIT::useChildren(Node* node)
963 {
964     if (node->flags() & NodeHasVarArgs) {
965         for (unsigned childIdx = node->firstChild(); childIdx < node->firstChild() + node->numChildren(); childIdx++) {
966             if (!!m_jit.graph().m_varArgChildren[childIdx])
967                 use(m_jit.graph().m_varArgChildren[childIdx]);
968         }
969     } else {
970         Edge child1 = node->child1();
971         if (!child1) {
972             ASSERT(!node->child2() && !node->child3());
973             return;
974         }
975         use(child1);
976         
977         Edge child2 = node->child2();
978         if (!child2) {
979             ASSERT(!node->child3());
980             return;
981         }
982         use(child2);
983         
984         Edge child3 = node->child3();
985         if (!child3)
986             return;
987         use(child3);
988     }
989 }
990
991 void SpeculativeJIT::compileTryGetById(Node* node)
992 {
993     switch (node->child1().useKind()) {
994     case CellUse: {
995         SpeculateCellOperand base(this, node->child1());
996         JSValueRegsTemporary result(this, Reuse, base);
997
998         JSValueRegs baseRegs = JSValueRegs::payloadOnly(base.gpr());
999         JSValueRegs resultRegs = result.regs();
1000
1001         base.use();
1002
1003         cachedGetById(node->origin.semantic, baseRegs, resultRegs, node->identifierNumber(), JITCompiler::Jump(), NeedToSpill, AccessType::TryGet);
1004
1005         jsValueResult(resultRegs, node, DataFormatJS, UseChildrenCalledExplicitly);
1006         break;
1007     }
1008
1009     case UntypedUse: {
1010         JSValueOperand base(this, node->child1());
1011         JSValueRegsTemporary result(this, Reuse, base);
1012
1013         JSValueRegs baseRegs = base.jsValueRegs();
1014         JSValueRegs resultRegs = result.regs();
1015
1016         base.use();
1017
1018         JITCompiler::Jump notCell = m_jit.branchIfNotCell(baseRegs);
1019
1020         cachedGetById(node->origin.semantic, baseRegs, resultRegs, node->identifierNumber(), notCell, NeedToSpill, AccessType::TryGet);
1021
1022         jsValueResult(resultRegs, node, DataFormatJS, UseChildrenCalledExplicitly);
1023         break;
1024     }
1025
1026     default:
1027         DFG_CRASH(m_jit.graph(), node, "Bad use kind");
1028         break;
1029     } 
1030 }
1031
1032 void SpeculativeJIT::compileIn(Node* node)
1033 {
1034     SpeculateCellOperand base(this, node->child1());
1035     GPRReg baseGPR = base.gpr();
1036     
1037     if (JSString* string = node->child2()->dynamicCastConstant<JSString*>(*m_jit.vm())) {
1038         if (string->tryGetValueImpl() && string->tryGetValueImpl()->isAtomic()) {
1039             StructureStubInfo* stubInfo = m_jit.codeBlock()->addStubInfo(AccessType::In);
1040             
1041             GPRTemporary result(this);
1042             GPRReg resultGPR = result.gpr();
1043
1044             use(node->child2());
1045             
1046             MacroAssembler::PatchableJump jump = m_jit.patchableJump();
1047             MacroAssembler::Label done = m_jit.label();
1048             
1049             // Since this block is executed only when the result of string->tryGetValueImpl() is atomic,
1050             // we can cast it to const AtomicStringImpl* safely.
1051             auto slowPath = slowPathCall(
1052                 jump.m_jump, this, operationInOptimize,
1053                 JSValueRegs::payloadOnly(resultGPR), stubInfo, baseGPR,
1054                 static_cast<const AtomicStringImpl*>(string->tryGetValueImpl()));
1055             
1056             stubInfo->callSiteIndex = m_jit.addCallSite(node->origin.semantic);
1057             stubInfo->codeOrigin = node->origin.semantic;
1058             stubInfo->patch.baseGPR = static_cast<int8_t>(baseGPR);
1059             stubInfo->patch.valueGPR = static_cast<int8_t>(resultGPR);
1060             stubInfo->patch.thisGPR = static_cast<int8_t>(InvalidGPRReg);
1061 #if USE(JSVALUE32_64)
1062             stubInfo->patch.valueTagGPR = static_cast<int8_t>(InvalidGPRReg);
1063             stubInfo->patch.baseTagGPR = static_cast<int8_t>(InvalidGPRReg);
1064             stubInfo->patch.thisTagGPR = static_cast<int8_t>(InvalidGPRReg);
1065 #endif
1066             stubInfo->patch.usedRegisters = usedRegisters();
1067
1068             m_jit.addIn(InRecord(jump, done, slowPath.get(), stubInfo));
1069             addSlowPathGenerator(WTFMove(slowPath));
1070
1071             base.use();
1072
1073             blessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
1074             return;
1075         }
1076     }
1077
1078     JSValueOperand key(this, node->child2());
1079     JSValueRegs regs = key.jsValueRegs();
1080         
1081     GPRFlushedCallResult result(this);
1082     GPRReg resultGPR = result.gpr();
1083         
1084     base.use();
1085     key.use();
1086         
1087     flushRegisters();
1088     callOperation(
1089         operationGenericIn, extractResult(JSValueRegs::payloadOnly(resultGPR)),
1090         baseGPR, regs);
1091     m_jit.exceptionCheck();
1092     blessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
1093 }
1094
1095 void SpeculativeJIT::compileDeleteById(Node* node)
1096 {
1097     JSValueOperand value(this, node->child1());
1098     GPRFlushedCallResult result(this);
1099
1100     JSValueRegs valueRegs = value.jsValueRegs();
1101     GPRReg resultGPR = result.gpr();
1102
1103     value.use();
1104
1105     flushRegisters();
1106     callOperation(operationDeleteById, resultGPR, valueRegs, identifierUID(node->identifierNumber()));
1107     m_jit.exceptionCheck();
1108
1109     unblessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
1110 }
1111
1112 void SpeculativeJIT::compileDeleteByVal(Node* node)
1113 {
1114     JSValueOperand base(this, node->child1());
1115     JSValueOperand key(this, node->child2());
1116     GPRFlushedCallResult result(this);
1117
1118     JSValueRegs baseRegs = base.jsValueRegs();
1119     JSValueRegs keyRegs = key.jsValueRegs();
1120     GPRReg resultGPR = result.gpr();
1121
1122     base.use();
1123     key.use();
1124
1125     flushRegisters();
1126     callOperation(operationDeleteByVal, resultGPR, baseRegs, keyRegs);
1127     m_jit.exceptionCheck();
1128
1129     unblessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
1130 }
1131
1132 bool SpeculativeJIT::nonSpeculativeCompare(Node* node, MacroAssembler::RelationalCondition cond, S_JITOperation_EJJ helperFunction)
1133 {
1134     unsigned branchIndexInBlock = detectPeepHoleBranch();
1135     if (branchIndexInBlock != UINT_MAX) {
1136         Node* branchNode = m_block->at(branchIndexInBlock);
1137
1138         ASSERT(node->adjustedRefCount() == 1);
1139         
1140         nonSpeculativePeepholeBranch(node, branchNode, cond, helperFunction);
1141     
1142         m_indexInBlock = branchIndexInBlock;
1143         m_currentNode = branchNode;
1144         
1145         return true;
1146     }
1147     
1148     nonSpeculativeNonPeepholeCompare(node, cond, helperFunction);
1149     
1150     return false;
1151 }
1152
1153 bool SpeculativeJIT::nonSpeculativeStrictEq(Node* node, bool invert)
1154 {
1155     unsigned branchIndexInBlock = detectPeepHoleBranch();
1156     if (branchIndexInBlock != UINT_MAX) {
1157         Node* branchNode = m_block->at(branchIndexInBlock);
1158
1159         ASSERT(node->adjustedRefCount() == 1);
1160         
1161         nonSpeculativePeepholeStrictEq(node, branchNode, invert);
1162     
1163         m_indexInBlock = branchIndexInBlock;
1164         m_currentNode = branchNode;
1165         
1166         return true;
1167     }
1168     
1169     nonSpeculativeNonPeepholeStrictEq(node, invert);
1170     
1171     return false;
1172 }
1173
1174 static const char* dataFormatString(DataFormat format)
1175 {
1176     // These values correspond to the DataFormat enum.
1177     const char* strings[] = {
1178         "[  ]",
1179         "[ i]",
1180         "[ d]",
1181         "[ c]",
1182         "Err!",
1183         "Err!",
1184         "Err!",
1185         "Err!",
1186         "[J ]",
1187         "[Ji]",
1188         "[Jd]",
1189         "[Jc]",
1190         "Err!",
1191         "Err!",
1192         "Err!",
1193         "Err!",
1194     };
1195     return strings[format];
1196 }
1197
1198 void SpeculativeJIT::dump(const char* label)
1199 {
1200     if (label)
1201         dataLogF("<%s>\n", label);
1202
1203     dataLogF("  gprs:\n");
1204     m_gprs.dump();
1205     dataLogF("  fprs:\n");
1206     m_fprs.dump();
1207     dataLogF("  VirtualRegisters:\n");
1208     for (unsigned i = 0; i < m_generationInfo.size(); ++i) {
1209         GenerationInfo& info = m_generationInfo[i];
1210         if (info.alive())
1211             dataLogF("    % 3d:%s%s", i, dataFormatString(info.registerFormat()), dataFormatString(info.spillFormat()));
1212         else
1213             dataLogF("    % 3d:[__][__]", i);
1214         if (info.registerFormat() == DataFormatDouble)
1215             dataLogF(":fpr%d\n", info.fpr());
1216         else if (info.registerFormat() != DataFormatNone
1217 #if USE(JSVALUE32_64)
1218             && !(info.registerFormat() & DataFormatJS)
1219 #endif
1220             ) {
1221             ASSERT(info.gpr() != InvalidGPRReg);
1222             dataLogF(":%s\n", GPRInfo::debugName(info.gpr()));
1223         } else
1224             dataLogF("\n");
1225     }
1226     if (label)
1227         dataLogF("</%s>\n", label);
1228 }
1229
1230 GPRTemporary::GPRTemporary()
1231     : m_jit(0)
1232     , m_gpr(InvalidGPRReg)
1233 {
1234 }
1235
1236 GPRTemporary::GPRTemporary(SpeculativeJIT* jit)
1237     : m_jit(jit)
1238     , m_gpr(InvalidGPRReg)
1239 {
1240     m_gpr = m_jit->allocate();
1241 }
1242
1243 GPRTemporary::GPRTemporary(SpeculativeJIT* jit, GPRReg specific)
1244     : m_jit(jit)
1245     , m_gpr(InvalidGPRReg)
1246 {
1247     m_gpr = m_jit->allocate(specific);
1248 }
1249
1250 #if USE(JSVALUE32_64)
1251 GPRTemporary::GPRTemporary(
1252     SpeculativeJIT* jit, ReuseTag, JSValueOperand& op1, WhichValueWord which)
1253     : m_jit(jit)
1254     , m_gpr(InvalidGPRReg)
1255 {
1256     if (!op1.isDouble() && m_jit->canReuse(op1.node()))
1257         m_gpr = m_jit->reuse(op1.gpr(which));
1258     else
1259         m_gpr = m_jit->allocate();
1260 }
1261 #endif // USE(JSVALUE32_64)
1262
1263 JSValueRegsTemporary::JSValueRegsTemporary() { }
1264
1265 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit)
1266 #if USE(JSVALUE64)
1267     : m_gpr(jit)
1268 #else
1269     : m_payloadGPR(jit)
1270     , m_tagGPR(jit)
1271 #endif
1272 {
1273 }
1274
1275 #if USE(JSVALUE64)
1276 template<typename T>
1277 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit, ReuseTag, T& operand, WhichValueWord)
1278     : m_gpr(jit, Reuse, operand)
1279 {
1280 }
1281 #else
1282 template<typename T>
1283 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit, ReuseTag, T& operand, WhichValueWord resultWord)
1284 {
1285     if (resultWord == PayloadWord) {
1286         m_payloadGPR = GPRTemporary(jit, Reuse, operand);
1287         m_tagGPR = GPRTemporary(jit);
1288     } else {
1289         m_payloadGPR = GPRTemporary(jit);
1290         m_tagGPR = GPRTemporary(jit, Reuse, operand);
1291     }
1292 }
1293 #endif
1294
1295 #if USE(JSVALUE64)
1296 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit, ReuseTag, JSValueOperand& operand)
1297 {
1298     m_gpr = GPRTemporary(jit, Reuse, operand);
1299 }
1300 #else
1301 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit, ReuseTag, JSValueOperand& operand)
1302 {
1303     if (jit->canReuse(operand.node())) {
1304         m_payloadGPR = GPRTemporary(jit, Reuse, operand, PayloadWord);
1305         m_tagGPR = GPRTemporary(jit, Reuse, operand, TagWord);
1306     } else {
1307         m_payloadGPR = GPRTemporary(jit);
1308         m_tagGPR = GPRTemporary(jit);
1309     }
1310 }
1311 #endif
1312
1313 JSValueRegsTemporary::~JSValueRegsTemporary() { }
1314
1315 JSValueRegs JSValueRegsTemporary::regs()
1316 {
1317 #if USE(JSVALUE64)
1318     return JSValueRegs(m_gpr.gpr());
1319 #else
1320     return JSValueRegs(m_tagGPR.gpr(), m_payloadGPR.gpr());
1321 #endif
1322 }
1323
1324 void GPRTemporary::adopt(GPRTemporary& other)
1325 {
1326     ASSERT(!m_jit);
1327     ASSERT(m_gpr == InvalidGPRReg);
1328     ASSERT(other.m_jit);
1329     ASSERT(other.m_gpr != InvalidGPRReg);
1330     m_jit = other.m_jit;
1331     m_gpr = other.m_gpr;
1332     other.m_jit = 0;
1333     other.m_gpr = InvalidGPRReg;
1334 }
1335
1336 FPRTemporary::FPRTemporary(FPRTemporary&& other)
1337 {
1338     ASSERT(other.m_jit);
1339     ASSERT(other.m_fpr != InvalidFPRReg);
1340     m_jit = other.m_jit;
1341     m_fpr = other.m_fpr;
1342
1343     other.m_jit = nullptr;
1344 }
1345
1346 FPRTemporary::FPRTemporary(SpeculativeJIT* jit)
1347     : m_jit(jit)
1348     , m_fpr(InvalidFPRReg)
1349 {
1350     m_fpr = m_jit->fprAllocate();
1351 }
1352
1353 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1)
1354     : m_jit(jit)
1355     , m_fpr(InvalidFPRReg)
1356 {
1357     if (m_jit->canReuse(op1.node()))
1358         m_fpr = m_jit->reuse(op1.fpr());
1359     else
1360         m_fpr = m_jit->fprAllocate();
1361 }
1362
1363 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1, SpeculateDoubleOperand& op2)
1364     : m_jit(jit)
1365     , m_fpr(InvalidFPRReg)
1366 {
1367     if (m_jit->canReuse(op1.node()))
1368         m_fpr = m_jit->reuse(op1.fpr());
1369     else if (m_jit->canReuse(op2.node()))
1370         m_fpr = m_jit->reuse(op2.fpr());
1371     else if (m_jit->canReuse(op1.node(), op2.node()) && op1.fpr() == op2.fpr())
1372         m_fpr = m_jit->reuse(op1.fpr());
1373     else
1374         m_fpr = m_jit->fprAllocate();
1375 }
1376
1377 #if USE(JSVALUE32_64)
1378 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, JSValueOperand& op1)
1379     : m_jit(jit)
1380     , m_fpr(InvalidFPRReg)
1381 {
1382     if (op1.isDouble() && m_jit->canReuse(op1.node()))
1383         m_fpr = m_jit->reuse(op1.fpr());
1384     else
1385         m_fpr = m_jit->fprAllocate();
1386 }
1387 #endif
1388
1389 void SpeculativeJIT::compilePeepHoleDoubleBranch(Node* node, Node* branchNode, JITCompiler::DoubleCondition condition)
1390 {
1391     BasicBlock* taken = branchNode->branchData()->taken.block;
1392     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1393
1394     if (taken == nextBlock()) {
1395         condition = MacroAssembler::invert(condition);
1396         std::swap(taken, notTaken);
1397     }
1398
1399     SpeculateDoubleOperand op1(this, node->child1());
1400     SpeculateDoubleOperand op2(this, node->child2());
1401     
1402     branchDouble(condition, op1.fpr(), op2.fpr(), taken);
1403     jump(notTaken);
1404 }
1405
1406 void SpeculativeJIT::compilePeepHoleObjectEquality(Node* node, Node* branchNode)
1407 {
1408     BasicBlock* taken = branchNode->branchData()->taken.block;
1409     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1410
1411     MacroAssembler::RelationalCondition condition = MacroAssembler::Equal;
1412     
1413     if (taken == nextBlock()) {
1414         condition = MacroAssembler::NotEqual;
1415         BasicBlock* tmp = taken;
1416         taken = notTaken;
1417         notTaken = tmp;
1418     }
1419
1420     SpeculateCellOperand op1(this, node->child1());
1421     SpeculateCellOperand op2(this, node->child2());
1422     
1423     GPRReg op1GPR = op1.gpr();
1424     GPRReg op2GPR = op2.gpr();
1425     
1426     if (masqueradesAsUndefinedWatchpointIsStillValid()) {
1427         if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1428             speculationCheck(
1429                 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(), m_jit.branchIfNotObject(op1GPR));
1430         }
1431         if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1432             speculationCheck(
1433                 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(), m_jit.branchIfNotObject(op2GPR));
1434         }
1435     } else {
1436         if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1437             speculationCheck(
1438                 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1439                 m_jit.branchIfNotObject(op1GPR));
1440         }
1441         speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1442             m_jit.branchTest8(
1443                 MacroAssembler::NonZero, 
1444                 MacroAssembler::Address(op1GPR, JSCell::typeInfoFlagsOffset()), 
1445                 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1446
1447         if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1448             speculationCheck(
1449                 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1450                 m_jit.branchIfNotObject(op2GPR));
1451         }
1452         speculationCheck(BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1453             m_jit.branchTest8(
1454                 MacroAssembler::NonZero, 
1455                 MacroAssembler::Address(op2GPR, JSCell::typeInfoFlagsOffset()), 
1456                 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1457     }
1458
1459     branchPtr(condition, op1GPR, op2GPR, taken);
1460     jump(notTaken);
1461 }
1462
1463 void SpeculativeJIT::compilePeepHoleBooleanBranch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1464 {
1465     BasicBlock* taken = branchNode->branchData()->taken.block;
1466     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1467
1468     // The branch instruction will branch to the taken block.
1469     // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1470     if (taken == nextBlock()) {
1471         condition = JITCompiler::invert(condition);
1472         BasicBlock* tmp = taken;
1473         taken = notTaken;
1474         notTaken = tmp;
1475     }
1476
1477     if (node->child1()->isInt32Constant()) {
1478         int32_t imm = node->child1()->asInt32();
1479         SpeculateBooleanOperand op2(this, node->child2());
1480         branch32(condition, JITCompiler::Imm32(imm), op2.gpr(), taken);
1481     } else if (node->child2()->isInt32Constant()) {
1482         SpeculateBooleanOperand op1(this, node->child1());
1483         int32_t imm = node->child2()->asInt32();
1484         branch32(condition, op1.gpr(), JITCompiler::Imm32(imm), taken);
1485     } else {
1486         SpeculateBooleanOperand op1(this, node->child1());
1487         SpeculateBooleanOperand op2(this, node->child2());
1488         branch32(condition, op1.gpr(), op2.gpr(), taken);
1489     }
1490
1491     jump(notTaken);
1492 }
1493
1494 void SpeculativeJIT::compileToLowerCase(Node* node)
1495 {
1496     ASSERT(node->op() == ToLowerCase);
1497     SpeculateCellOperand string(this, node->child1());
1498     GPRTemporary temp(this);
1499     GPRTemporary index(this);
1500     GPRTemporary charReg(this);
1501     GPRTemporary length(this);
1502
1503     GPRReg stringGPR = string.gpr();
1504     GPRReg tempGPR = temp.gpr();
1505     GPRReg indexGPR = index.gpr();
1506     GPRReg charGPR = charReg.gpr();
1507     GPRReg lengthGPR = length.gpr();
1508
1509     speculateString(node->child1(), stringGPR);
1510
1511     CCallHelpers::JumpList slowPath;
1512
1513     m_jit.move(TrustedImmPtr(0), indexGPR);
1514
1515     m_jit.loadPtr(MacroAssembler::Address(stringGPR, JSString::offsetOfValue()), tempGPR);
1516     slowPath.append(m_jit.branchTestPtr(MacroAssembler::Zero, tempGPR));
1517
1518     slowPath.append(m_jit.branchTest32(
1519         MacroAssembler::Zero, MacroAssembler::Address(tempGPR, StringImpl::flagsOffset()),
1520         MacroAssembler::TrustedImm32(StringImpl::flagIs8Bit())));
1521     m_jit.load32(MacroAssembler::Address(tempGPR, StringImpl::lengthMemoryOffset()), lengthGPR);
1522     m_jit.loadPtr(MacroAssembler::Address(tempGPR, StringImpl::dataOffset()), tempGPR);
1523
1524     auto loopStart = m_jit.label();
1525     auto loopDone = m_jit.branch32(CCallHelpers::AboveOrEqual, indexGPR, lengthGPR);
1526     m_jit.load8(MacroAssembler::BaseIndex(tempGPR, indexGPR, MacroAssembler::TimesOne), charGPR);
1527     slowPath.append(m_jit.branchTest32(CCallHelpers::NonZero, charGPR, TrustedImm32(~0x7F)));
1528     m_jit.sub32(TrustedImm32('A'), charGPR);
1529     slowPath.append(m_jit.branch32(CCallHelpers::BelowOrEqual, charGPR, TrustedImm32('Z' - 'A')));
1530
1531     m_jit.add32(TrustedImm32(1), indexGPR);
1532     m_jit.jump().linkTo(loopStart, &m_jit);
1533     
1534     slowPath.link(&m_jit);
1535     silentSpillAllRegisters(lengthGPR);
1536     callOperation(operationToLowerCase, lengthGPR, stringGPR, indexGPR);
1537     silentFillAllRegisters(lengthGPR);
1538     m_jit.exceptionCheck();
1539     auto done = m_jit.jump();
1540
1541     loopDone.link(&m_jit);
1542     m_jit.move(stringGPR, lengthGPR);
1543
1544     done.link(&m_jit);
1545     cellResult(lengthGPR, node);
1546 }
1547
1548 void SpeculativeJIT::compilePeepHoleInt32Branch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1549 {
1550     BasicBlock* taken = branchNode->branchData()->taken.block;
1551     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1552
1553     // The branch instruction will branch to the taken block.
1554     // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1555     if (taken == nextBlock()) {
1556         condition = JITCompiler::invert(condition);
1557         BasicBlock* tmp = taken;
1558         taken = notTaken;
1559         notTaken = tmp;
1560     }
1561
1562     if (node->child1()->isInt32Constant()) {
1563         int32_t imm = node->child1()->asInt32();
1564         SpeculateInt32Operand op2(this, node->child2());
1565         branch32(condition, JITCompiler::Imm32(imm), op2.gpr(), taken);
1566     } else if (node->child2()->isInt32Constant()) {
1567         SpeculateInt32Operand op1(this, node->child1());
1568         int32_t imm = node->child2()->asInt32();
1569         branch32(condition, op1.gpr(), JITCompiler::Imm32(imm), taken);
1570     } else {
1571         SpeculateInt32Operand op1(this, node->child1());
1572         SpeculateInt32Operand op2(this, node->child2());
1573         branch32(condition, op1.gpr(), op2.gpr(), taken);
1574     }
1575
1576     jump(notTaken);
1577 }
1578
1579 // Returns true if the compare is fused with a subsequent branch.
1580 bool SpeculativeJIT::compilePeepHoleBranch(Node* node, MacroAssembler::RelationalCondition condition, MacroAssembler::DoubleCondition doubleCondition, S_JITOperation_EJJ operation)
1581 {
1582     // Fused compare & branch.
1583     unsigned branchIndexInBlock = detectPeepHoleBranch();
1584     if (branchIndexInBlock != UINT_MAX) {
1585         Node* branchNode = m_block->at(branchIndexInBlock);
1586
1587         // detectPeepHoleBranch currently only permits the branch to be the very next node,
1588         // so can be no intervening nodes to also reference the compare. 
1589         ASSERT(node->adjustedRefCount() == 1);
1590
1591         if (node->isBinaryUseKind(Int32Use))
1592             compilePeepHoleInt32Branch(node, branchNode, condition);
1593 #if USE(JSVALUE64)
1594         else if (node->isBinaryUseKind(Int52RepUse))
1595             compilePeepHoleInt52Branch(node, branchNode, condition);
1596 #endif // USE(JSVALUE64)
1597         else if (node->isBinaryUseKind(StringUse) || node->isBinaryUseKind(StringIdentUse)) {
1598             // Use non-peephole comparison, for now.
1599             return false;
1600         } else if (node->isBinaryUseKind(DoubleRepUse))
1601             compilePeepHoleDoubleBranch(node, branchNode, doubleCondition);
1602         else if (node->op() == CompareEq) {
1603             if (node->isBinaryUseKind(BooleanUse))
1604                 compilePeepHoleBooleanBranch(node, branchNode, condition);
1605             else if (node->isBinaryUseKind(SymbolUse))
1606                 compilePeepHoleSymbolEquality(node, branchNode);
1607             else if (node->isBinaryUseKind(ObjectUse))
1608                 compilePeepHoleObjectEquality(node, branchNode);
1609             else if (node->isBinaryUseKind(ObjectUse, ObjectOrOtherUse))
1610                 compilePeepHoleObjectToObjectOrOtherEquality(node->child1(), node->child2(), branchNode);
1611             else if (node->isBinaryUseKind(ObjectOrOtherUse, ObjectUse))
1612                 compilePeepHoleObjectToObjectOrOtherEquality(node->child2(), node->child1(), branchNode);
1613             else if (!needsTypeCheck(node->child1(), SpecOther))
1614                 nonSpeculativePeepholeBranchNullOrUndefined(node->child2(), branchNode);
1615             else if (!needsTypeCheck(node->child2(), SpecOther))
1616                 nonSpeculativePeepholeBranchNullOrUndefined(node->child1(), branchNode);
1617             else {
1618                 nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1619                 return true;
1620             }
1621         } else {
1622             nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1623             return true;
1624         }
1625
1626         use(node->child1());
1627         use(node->child2());
1628         m_indexInBlock = branchIndexInBlock;
1629         m_currentNode = branchNode;
1630         return true;
1631     }
1632     return false;
1633 }
1634
1635 void SpeculativeJIT::noticeOSRBirth(Node* node)
1636 {
1637     if (!node->hasVirtualRegister())
1638         return;
1639     
1640     VirtualRegister virtualRegister = node->virtualRegister();
1641     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1642     
1643     info.noticeOSRBirth(*m_stream, node, virtualRegister);
1644 }
1645
1646 void SpeculativeJIT::compileMovHint(Node* node)
1647 {
1648     ASSERT(node->containsMovHint() && node->op() != ZombieHint);
1649     
1650     Node* child = node->child1().node();
1651     noticeOSRBirth(child);
1652     
1653     m_stream->appendAndLog(VariableEvent::movHint(MinifiedID(child), node->unlinkedLocal()));
1654 }
1655
1656 void SpeculativeJIT::bail(AbortReason reason)
1657 {
1658     if (verboseCompilationEnabled())
1659         dataLog("Bailing compilation.\n");
1660     m_compileOkay = true;
1661     m_jit.abortWithReason(reason, m_lastGeneratedNode);
1662     clearGenerationInfo();
1663 }
1664
1665 void SpeculativeJIT::compileCurrentBlock()
1666 {
1667     ASSERT(m_compileOkay);
1668     
1669     if (!m_block)
1670         return;
1671     
1672     ASSERT(m_block->isReachable);
1673     
1674     m_jit.blockHeads()[m_block->index] = m_jit.label();
1675
1676     if (!m_block->intersectionOfCFAHasVisited) {
1677         // Don't generate code for basic blocks that are unreachable according to CFA.
1678         // But to be sure that nobody has generated a jump to this block, drop in a
1679         // breakpoint here.
1680         m_jit.abortWithReason(DFGUnreachableBasicBlock);
1681         return;
1682     }
1683
1684     m_stream->appendAndLog(VariableEvent::reset());
1685     
1686     m_jit.jitAssertHasValidCallFrame();
1687     m_jit.jitAssertTagsInPlace();
1688     m_jit.jitAssertArgumentCountSane();
1689
1690     m_state.reset();
1691     m_state.beginBasicBlock(m_block);
1692     
1693     for (size_t i = m_block->variablesAtHead.size(); i--;) {
1694         int operand = m_block->variablesAtHead.operandForIndex(i);
1695         Node* node = m_block->variablesAtHead[i];
1696         if (!node)
1697             continue; // No need to record dead SetLocal's.
1698         
1699         VariableAccessData* variable = node->variableAccessData();
1700         DataFormat format;
1701         if (!node->refCount())
1702             continue; // No need to record dead SetLocal's.
1703         format = dataFormatFor(variable->flushFormat());
1704         m_stream->appendAndLog(
1705             VariableEvent::setLocal(
1706                 VirtualRegister(operand),
1707                 variable->machineLocal(),
1708                 format));
1709     }
1710
1711     m_origin = NodeOrigin();
1712     
1713     for (m_indexInBlock = 0; m_indexInBlock < m_block->size(); ++m_indexInBlock) {
1714         m_currentNode = m_block->at(m_indexInBlock);
1715         
1716         // We may have hit a contradiction that the CFA was aware of but that the JIT
1717         // didn't cause directly.
1718         if (!m_state.isValid()) {
1719             bail(DFGBailedAtTopOfBlock);
1720             return;
1721         }
1722
1723         m_interpreter.startExecuting();
1724         m_interpreter.executeKnownEdgeTypes(m_currentNode);
1725         m_jit.setForNode(m_currentNode);
1726         m_origin = m_currentNode->origin;
1727         if (validationEnabled())
1728             m_origin.exitOK &= mayExit(m_jit.graph(), m_currentNode) == Exits;
1729         m_lastGeneratedNode = m_currentNode->op();
1730         
1731         ASSERT(m_currentNode->shouldGenerate());
1732         
1733         if (verboseCompilationEnabled()) {
1734             dataLogF(
1735                 "SpeculativeJIT generating Node @%d (bc#%u) at JIT offset 0x%x",
1736                 (int)m_currentNode->index(),
1737                 m_currentNode->origin.semantic.bytecodeIndex, m_jit.debugOffset());
1738             dataLog("\n");
1739         }
1740
1741         if (Options::validateDFGExceptionHandling() && (mayExit(m_jit.graph(), m_currentNode) != DoesNotExit || m_currentNode->isTerminal()))
1742             m_jit.jitReleaseAssertNoException(*m_jit.vm());
1743
1744         m_jit.pcToCodeOriginMapBuilder().appendItem(m_jit.labelIgnoringWatchpoints(), m_origin.semantic);
1745
1746         compile(m_currentNode);
1747         
1748         if (belongsInMinifiedGraph(m_currentNode->op()))
1749             m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode));
1750         
1751 #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
1752         m_jit.clearRegisterAllocationOffsets();
1753 #endif
1754         
1755         if (!m_compileOkay) {
1756             bail(DFGBailedAtEndOfNode);
1757             return;
1758         }
1759         
1760         // Make sure that the abstract state is rematerialized for the next node.
1761         m_interpreter.executeEffects(m_indexInBlock);
1762     }
1763     
1764     // Perform the most basic verification that children have been used correctly.
1765     if (!ASSERT_DISABLED) {
1766         for (auto& info : m_generationInfo)
1767             RELEASE_ASSERT(!info.alive());
1768     }
1769 }
1770
1771 // If we are making type predictions about our arguments then
1772 // we need to check that they are correct on function entry.
1773 void SpeculativeJIT::checkArgumentTypes()
1774 {
1775     ASSERT(!m_currentNode);
1776     m_origin = NodeOrigin(CodeOrigin(0), CodeOrigin(0), true);
1777
1778     for (int i = 0; i < m_jit.codeBlock()->numParameters(); ++i) {
1779         Node* node = m_jit.graph().m_arguments[i];
1780         if (!node) {
1781             // The argument is dead. We don't do any checks for such arguments.
1782             continue;
1783         }
1784         
1785         ASSERT(node->op() == SetArgument);
1786         ASSERT(node->shouldGenerate());
1787
1788         VariableAccessData* variableAccessData = node->variableAccessData();
1789         FlushFormat format = variableAccessData->flushFormat();
1790         
1791         if (format == FlushedJSValue)
1792             continue;
1793         
1794         VirtualRegister virtualRegister = variableAccessData->local();
1795
1796         JSValueSource valueSource = JSValueSource(JITCompiler::addressFor(virtualRegister));
1797         
1798 #if USE(JSVALUE64)
1799         switch (format) {
1800         case FlushedInt32: {
1801             speculationCheck(BadType, valueSource, node, m_jit.branch64(MacroAssembler::Below, JITCompiler::addressFor(virtualRegister), GPRInfo::tagTypeNumberRegister));
1802             break;
1803         }
1804         case FlushedBoolean: {
1805             GPRTemporary temp(this);
1806             m_jit.load64(JITCompiler::addressFor(virtualRegister), temp.gpr());
1807             m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), temp.gpr());
1808             speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, temp.gpr(), TrustedImm32(static_cast<int32_t>(~1))));
1809             break;
1810         }
1811         case FlushedCell: {
1812             speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, JITCompiler::addressFor(virtualRegister), GPRInfo::tagMaskRegister));
1813             break;
1814         }
1815         default:
1816             RELEASE_ASSERT_NOT_REACHED();
1817             break;
1818         }
1819 #else
1820         switch (format) {
1821         case FlushedInt32: {
1822             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::Int32Tag)));
1823             break;
1824         }
1825         case FlushedBoolean: {
1826             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::BooleanTag)));
1827             break;
1828         }
1829         case FlushedCell: {
1830             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::CellTag)));
1831             break;
1832         }
1833         default:
1834             RELEASE_ASSERT_NOT_REACHED();
1835             break;
1836         }
1837 #endif
1838     }
1839
1840     m_origin = NodeOrigin();
1841 }
1842
1843 bool SpeculativeJIT::compile()
1844 {
1845     checkArgumentTypes();
1846     
1847     ASSERT(!m_currentNode);
1848     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1849         m_jit.setForBlockIndex(blockIndex);
1850         m_block = m_jit.graph().block(blockIndex);
1851         compileCurrentBlock();
1852     }
1853     linkBranches();
1854     return true;
1855 }
1856
1857 void SpeculativeJIT::createOSREntries()
1858 {
1859     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1860         BasicBlock* block = m_jit.graph().block(blockIndex);
1861         if (!block)
1862             continue;
1863         if (!block->isOSRTarget)
1864             continue;
1865         
1866         // Currently we don't have OSR entry trampolines. We could add them
1867         // here if need be.
1868         m_osrEntryHeads.append(m_jit.blockHeads()[blockIndex]);
1869     }
1870 }
1871
1872 void SpeculativeJIT::linkOSREntries(LinkBuffer& linkBuffer)
1873 {
1874     unsigned osrEntryIndex = 0;
1875     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1876         BasicBlock* block = m_jit.graph().block(blockIndex);
1877         if (!block)
1878             continue;
1879         if (!block->isOSRTarget)
1880             continue;
1881         m_jit.noticeOSREntry(*block, m_osrEntryHeads[osrEntryIndex++], linkBuffer);
1882     }
1883     ASSERT(osrEntryIndex == m_osrEntryHeads.size());
1884     
1885     if (verboseCompilationEnabled()) {
1886         DumpContext dumpContext;
1887         dataLog("OSR Entries:\n");
1888         for (OSREntryData& entryData : m_jit.jitCode()->osrEntry)
1889             dataLog("    ", inContext(entryData, &dumpContext), "\n");
1890         if (!dumpContext.isEmpty())
1891             dumpContext.dump(WTF::dataFile());
1892     }
1893 }
1894     
1895 void SpeculativeJIT::compileCheckTraps(Node*)
1896 {
1897     ASSERT(Options::usePollingTraps());
1898     GPRTemporary unused(this);
1899     GPRReg unusedGPR = unused.gpr();
1900
1901     JITCompiler::Jump needTrapHandling = m_jit.branchTest8(JITCompiler::NonZero,
1902         JITCompiler::AbsoluteAddress(m_jit.vm()->needTrapHandlingAddress()));
1903
1904     addSlowPathGenerator(slowPathCall(needTrapHandling, this, operationHandleTraps, unusedGPR));
1905 }
1906
1907 void SpeculativeJIT::compileDoublePutByVal(Node* node, SpeculateCellOperand& base, SpeculateStrictInt32Operand& property)
1908 {
1909     Edge child3 = m_jit.graph().varArgChild(node, 2);
1910     Edge child4 = m_jit.graph().varArgChild(node, 3);
1911
1912     ArrayMode arrayMode = node->arrayMode();
1913     
1914     GPRReg baseReg = base.gpr();
1915     GPRReg propertyReg = property.gpr();
1916     
1917     SpeculateDoubleOperand value(this, child3);
1918
1919     FPRReg valueReg = value.fpr();
1920     
1921     DFG_TYPE_CHECK(
1922         JSValueRegs(), child3, SpecFullRealNumber,
1923         m_jit.branchDouble(
1924             MacroAssembler::DoubleNotEqualOrUnordered, valueReg, valueReg));
1925     
1926     if (!m_compileOkay)
1927         return;
1928     
1929     StorageOperand storage(this, child4);
1930     GPRReg storageReg = storage.gpr();
1931
1932     if (node->op() == PutByValAlias) {
1933         // Store the value to the array.
1934         GPRReg propertyReg = property.gpr();
1935         FPRReg valueReg = value.fpr();
1936         m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
1937         
1938         noResult(m_currentNode);
1939         return;
1940     }
1941     
1942     GPRTemporary temporary;
1943     GPRReg temporaryReg = temporaryRegisterForPutByVal(temporary, node);
1944
1945     MacroAssembler::Jump slowCase;
1946     
1947     if (arrayMode.isInBounds()) {
1948         speculationCheck(
1949             OutOfBounds, JSValueRegs(), 0,
1950             m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())));
1951     } else {
1952         MacroAssembler::Jump inBounds = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
1953         
1954         slowCase = m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfVectorLength()));
1955         
1956         if (!arrayMode.isOutOfBounds())
1957             speculationCheck(OutOfBounds, JSValueRegs(), 0, slowCase);
1958         
1959         m_jit.add32(TrustedImm32(1), propertyReg, temporaryReg);
1960         m_jit.store32(temporaryReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
1961         
1962         inBounds.link(&m_jit);
1963     }
1964     
1965     m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
1966
1967     base.use();
1968     property.use();
1969     value.use();
1970     storage.use();
1971     
1972     if (arrayMode.isOutOfBounds()) {
1973         addSlowPathGenerator(
1974             slowPathCall(
1975                 slowCase, this,
1976                 m_jit.codeBlock()->isStrictMode() ? operationPutDoubleByValBeyondArrayBoundsStrict : operationPutDoubleByValBeyondArrayBoundsNonStrict,
1977                 NoResult, baseReg, propertyReg, valueReg));
1978     }
1979
1980     noResult(m_currentNode, UseChildrenCalledExplicitly);
1981 }
1982
1983 void SpeculativeJIT::compileGetCharCodeAt(Node* node)
1984 {
1985     SpeculateCellOperand string(this, node->child1());
1986     SpeculateStrictInt32Operand index(this, node->child2());
1987     StorageOperand storage(this, node->child3());
1988
1989     GPRReg stringReg = string.gpr();
1990     GPRReg indexReg = index.gpr();
1991     GPRReg storageReg = storage.gpr();
1992     
1993     ASSERT(speculationChecked(m_state.forNode(node->child1()).m_type, SpecString));
1994
1995     // unsigned comparison so we can filter out negative indices and indices that are too large
1996     speculationCheck(Uncountable, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::AboveOrEqual, indexReg, MacroAssembler::Address(stringReg, JSString::offsetOfLength())));
1997
1998     GPRTemporary scratch(this);
1999     GPRReg scratchReg = scratch.gpr();
2000
2001     m_jit.loadPtr(MacroAssembler::Address(stringReg, JSString::offsetOfValue()), scratchReg);
2002
2003     // Load the character into scratchReg
2004     JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
2005
2006     m_jit.load8(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesOne, 0), scratchReg);
2007     JITCompiler::Jump cont8Bit = m_jit.jump();
2008
2009     is16Bit.link(&m_jit);
2010
2011     m_jit.load16(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesTwo, 0), scratchReg);
2012
2013     cont8Bit.link(&m_jit);
2014
2015     int32Result(scratchReg, m_currentNode);
2016 }
2017
2018 void SpeculativeJIT::compileGetByValOnString(Node* node)
2019 {
2020     SpeculateCellOperand base(this, node->child1());
2021     SpeculateStrictInt32Operand property(this, node->child2());
2022     StorageOperand storage(this, node->child3());
2023     GPRReg baseReg = base.gpr();
2024     GPRReg propertyReg = property.gpr();
2025     GPRReg storageReg = storage.gpr();
2026
2027     GPRTemporary scratch(this);
2028     GPRReg scratchReg = scratch.gpr();
2029 #if USE(JSVALUE32_64)
2030     GPRTemporary resultTag;
2031     GPRReg resultTagReg = InvalidGPRReg;
2032     if (node->arrayMode().isOutOfBounds()) {
2033         GPRTemporary realResultTag(this);
2034         resultTag.adopt(realResultTag);
2035         resultTagReg = resultTag.gpr();
2036     }
2037 #endif
2038
2039     ASSERT(ArrayMode(Array::String).alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2040
2041     // unsigned comparison so we can filter out negative indices and indices that are too large
2042     JITCompiler::Jump outOfBounds = m_jit.branch32(
2043         MacroAssembler::AboveOrEqual, propertyReg,
2044         MacroAssembler::Address(baseReg, JSString::offsetOfLength()));
2045     if (node->arrayMode().isInBounds())
2046         speculationCheck(OutOfBounds, JSValueRegs(), 0, outOfBounds);
2047
2048     m_jit.loadPtr(MacroAssembler::Address(baseReg, JSString::offsetOfValue()), scratchReg);
2049
2050     // Load the character into scratchReg
2051     JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
2052
2053     m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne, 0), scratchReg);
2054     JITCompiler::Jump cont8Bit = m_jit.jump();
2055
2056     is16Bit.link(&m_jit);
2057
2058     m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo, 0), scratchReg);
2059
2060     JITCompiler::Jump bigCharacter =
2061         m_jit.branch32(MacroAssembler::AboveOrEqual, scratchReg, TrustedImm32(0x100));
2062
2063     // 8 bit string values don't need the isASCII check.
2064     cont8Bit.link(&m_jit);
2065
2066     m_jit.lshift32(MacroAssembler::TrustedImm32(sizeof(void*) == 4 ? 2 : 3), scratchReg);
2067     m_jit.addPtr(TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), scratchReg);
2068     m_jit.loadPtr(scratchReg, scratchReg);
2069
2070     addSlowPathGenerator(
2071         slowPathCall(
2072             bigCharacter, this, operationSingleCharacterString, scratchReg, scratchReg));
2073
2074     if (node->arrayMode().isOutOfBounds()) {
2075 #if USE(JSVALUE32_64)
2076         m_jit.move(TrustedImm32(JSValue::CellTag), resultTagReg);
2077 #endif
2078
2079         JSGlobalObject* globalObject = m_jit.globalObjectFor(node->origin.semantic);
2080         bool prototypeChainIsSane = false;
2081         if (globalObject->stringPrototypeChainIsSane()) {
2082             // FIXME: This could be captured using a Speculation mode that means "out-of-bounds
2083             // loads return a trivial value". Something like SaneChainOutOfBounds. This should
2084             // speculate that we don't take negative out-of-bounds, or better yet, it should rely
2085             // on a stringPrototypeChainIsSane() guaranteeing that the prototypes have no negative
2086             // indexed properties either.
2087             // https://bugs.webkit.org/show_bug.cgi?id=144668
2088             m_jit.graph().watchpoints().addLazily(globalObject->stringPrototype()->structure()->transitionWatchpointSet());
2089             m_jit.graph().watchpoints().addLazily(globalObject->objectPrototype()->structure()->transitionWatchpointSet());
2090             prototypeChainIsSane = globalObject->stringPrototypeChainIsSane();
2091         }
2092         if (prototypeChainIsSane) {
2093             m_jit.graph().watchpoints().addLazily(globalObject->stringPrototype()->structure()->transitionWatchpointSet());
2094             m_jit.graph().watchpoints().addLazily(globalObject->objectPrototype()->structure()->transitionWatchpointSet());
2095             
2096 #if USE(JSVALUE64)
2097             addSlowPathGenerator(std::make_unique<SaneStringGetByValSlowPathGenerator>(
2098                 outOfBounds, this, JSValueRegs(scratchReg), baseReg, propertyReg));
2099 #else
2100             addSlowPathGenerator(std::make_unique<SaneStringGetByValSlowPathGenerator>(
2101                 outOfBounds, this, JSValueRegs(resultTagReg, scratchReg),
2102                 baseReg, propertyReg));
2103 #endif
2104         } else {
2105 #if USE(JSVALUE64)
2106             addSlowPathGenerator(
2107                 slowPathCall(
2108                     outOfBounds, this, operationGetByValStringInt,
2109                     scratchReg, baseReg, propertyReg));
2110 #else
2111             addSlowPathGenerator(
2112                 slowPathCall(
2113                     outOfBounds, this, operationGetByValStringInt,
2114                     JSValueRegs(resultTagReg, scratchReg), baseReg, propertyReg));
2115 #endif
2116         }
2117         
2118 #if USE(JSVALUE64)
2119         jsValueResult(scratchReg, m_currentNode);
2120 #else
2121         jsValueResult(resultTagReg, scratchReg, m_currentNode);
2122 #endif
2123     } else
2124         cellResult(scratchReg, m_currentNode);
2125 }
2126
2127 void SpeculativeJIT::compileFromCharCode(Node* node)
2128 {
2129     Edge& child = node->child1();
2130     if (child.useKind() == UntypedUse) {
2131         JSValueOperand opr(this, child);
2132         JSValueRegs oprRegs = opr.jsValueRegs();
2133 #if USE(JSVALUE64)
2134         GPRTemporary result(this);
2135         JSValueRegs resultRegs = JSValueRegs(result.gpr());
2136 #else
2137         GPRTemporary resultTag(this);
2138         GPRTemporary resultPayload(this);
2139         JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
2140 #endif
2141         flushRegisters();
2142         callOperation(operationStringFromCharCodeUntyped, resultRegs, oprRegs);
2143         m_jit.exceptionCheck();
2144         
2145         jsValueResult(resultRegs, node);
2146         return;
2147     }
2148
2149     SpeculateStrictInt32Operand property(this, child);
2150     GPRReg propertyReg = property.gpr();
2151     GPRTemporary smallStrings(this);
2152     GPRTemporary scratch(this);
2153     GPRReg scratchReg = scratch.gpr();
2154     GPRReg smallStringsReg = smallStrings.gpr();
2155
2156     JITCompiler::JumpList slowCases;
2157     slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, TrustedImm32(0xff)));
2158     m_jit.move(TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), smallStringsReg);
2159     m_jit.loadPtr(MacroAssembler::BaseIndex(smallStringsReg, propertyReg, MacroAssembler::ScalePtr, 0), scratchReg);
2160
2161     slowCases.append(m_jit.branchTest32(MacroAssembler::Zero, scratchReg));
2162     addSlowPathGenerator(slowPathCall(slowCases, this, operationStringFromCharCode, scratchReg, propertyReg));
2163     cellResult(scratchReg, m_currentNode);
2164 }
2165
2166 GeneratedOperandType SpeculativeJIT::checkGeneratedTypeForToInt32(Node* node)
2167 {
2168     VirtualRegister virtualRegister = node->virtualRegister();
2169     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
2170
2171     switch (info.registerFormat()) {
2172     case DataFormatStorage:
2173         RELEASE_ASSERT_NOT_REACHED();
2174
2175     case DataFormatBoolean:
2176     case DataFormatCell:
2177         terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
2178         return GeneratedOperandTypeUnknown;
2179
2180     case DataFormatNone:
2181     case DataFormatJSCell:
2182     case DataFormatJS:
2183     case DataFormatJSBoolean:
2184     case DataFormatJSDouble:
2185         return GeneratedOperandJSValue;
2186
2187     case DataFormatJSInt32:
2188     case DataFormatInt32:
2189         return GeneratedOperandInteger;
2190
2191     default:
2192         RELEASE_ASSERT_NOT_REACHED();
2193         return GeneratedOperandTypeUnknown;
2194     }
2195 }
2196
2197 void SpeculativeJIT::compileValueToInt32(Node* node)
2198 {
2199     switch (node->child1().useKind()) {
2200 #if USE(JSVALUE64)
2201     case Int52RepUse: {
2202         SpeculateStrictInt52Operand op1(this, node->child1());
2203         GPRTemporary result(this, Reuse, op1);
2204         GPRReg op1GPR = op1.gpr();
2205         GPRReg resultGPR = result.gpr();
2206         m_jit.zeroExtend32ToPtr(op1GPR, resultGPR);
2207         int32Result(resultGPR, node, DataFormatInt32);
2208         return;
2209     }
2210 #endif // USE(JSVALUE64)
2211         
2212     case DoubleRepUse: {
2213         GPRTemporary result(this);
2214         SpeculateDoubleOperand op1(this, node->child1());
2215         FPRReg fpr = op1.fpr();
2216         GPRReg gpr = result.gpr();
2217         JITCompiler::Jump notTruncatedToInteger = m_jit.branchTruncateDoubleToInt32(fpr, gpr, JITCompiler::BranchIfTruncateFailed);
2218         
2219         addSlowPathGenerator(slowPathCall(notTruncatedToInteger, this,
2220             hasSensibleDoubleToInt() ? operationToInt32SensibleSlow : operationToInt32, NeedToSpill, ExceptionCheckRequirement::CheckNotNeeded, gpr, fpr));
2221         
2222         int32Result(gpr, node);
2223         return;
2224     }
2225     
2226     case NumberUse:
2227     case NotCellUse: {
2228         switch (checkGeneratedTypeForToInt32(node->child1().node())) {
2229         case GeneratedOperandInteger: {
2230             SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
2231             GPRTemporary result(this, Reuse, op1);
2232             m_jit.move(op1.gpr(), result.gpr());
2233             int32Result(result.gpr(), node, op1.format());
2234             return;
2235         }
2236         case GeneratedOperandJSValue: {
2237             GPRTemporary result(this);
2238 #if USE(JSVALUE64)
2239             JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2240
2241             GPRReg gpr = op1.gpr();
2242             GPRReg resultGpr = result.gpr();
2243             FPRTemporary tempFpr(this);
2244             FPRReg fpr = tempFpr.fpr();
2245
2246             JITCompiler::Jump isInteger = m_jit.branch64(MacroAssembler::AboveOrEqual, gpr, GPRInfo::tagTypeNumberRegister);
2247             JITCompiler::JumpList converted;
2248
2249             if (node->child1().useKind() == NumberUse) {
2250                 DFG_TYPE_CHECK(
2251                     JSValueRegs(gpr), node->child1(), SpecBytecodeNumber,
2252                     m_jit.branchTest64(
2253                         MacroAssembler::Zero, gpr, GPRInfo::tagTypeNumberRegister));
2254             } else {
2255                 JITCompiler::Jump isNumber = m_jit.branchTest64(MacroAssembler::NonZero, gpr, GPRInfo::tagTypeNumberRegister);
2256                 
2257                 DFG_TYPE_CHECK(
2258                     JSValueRegs(gpr), node->child1(), ~SpecCell, m_jit.branchIfCell(JSValueRegs(gpr)));
2259                 
2260                 // It's not a cell: so true turns into 1 and all else turns into 0.
2261                 m_jit.compare64(JITCompiler::Equal, gpr, TrustedImm32(ValueTrue), resultGpr);
2262                 converted.append(m_jit.jump());
2263                 
2264                 isNumber.link(&m_jit);
2265             }
2266
2267             // First, if we get here we have a double encoded as a JSValue
2268             unboxDouble(gpr, resultGpr, fpr);
2269
2270             silentSpillAllRegisters(resultGpr);
2271             callOperation(operationToInt32, resultGpr, fpr);
2272             silentFillAllRegisters(resultGpr);
2273
2274             converted.append(m_jit.jump());
2275
2276             isInteger.link(&m_jit);
2277             m_jit.zeroExtend32ToPtr(gpr, resultGpr);
2278
2279             converted.link(&m_jit);
2280 #else
2281             Node* childNode = node->child1().node();
2282             VirtualRegister virtualRegister = childNode->virtualRegister();
2283             GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
2284
2285             JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2286
2287             GPRReg payloadGPR = op1.payloadGPR();
2288             GPRReg resultGpr = result.gpr();
2289         
2290             JITCompiler::JumpList converted;
2291
2292             if (info.registerFormat() == DataFormatJSInt32)
2293                 m_jit.move(payloadGPR, resultGpr);
2294             else {
2295                 GPRReg tagGPR = op1.tagGPR();
2296                 FPRTemporary tempFpr(this);
2297                 FPRReg fpr = tempFpr.fpr();
2298                 FPRTemporary scratch(this);
2299
2300                 JITCompiler::Jump isInteger = m_jit.branch32(MacroAssembler::Equal, tagGPR, TrustedImm32(JSValue::Int32Tag));
2301
2302                 if (node->child1().useKind() == NumberUse) {
2303                     DFG_TYPE_CHECK(
2304                         op1.jsValueRegs(), node->child1(), SpecBytecodeNumber,
2305                         m_jit.branch32(
2306                             MacroAssembler::AboveOrEqual, tagGPR,
2307                             TrustedImm32(JSValue::LowestTag)));
2308                 } else {
2309                     JITCompiler::Jump isNumber = m_jit.branch32(MacroAssembler::Below, tagGPR, TrustedImm32(JSValue::LowestTag));
2310                     
2311                     DFG_TYPE_CHECK(
2312                         op1.jsValueRegs(), node->child1(), ~SpecCell,
2313                         m_jit.branchIfCell(op1.jsValueRegs()));
2314                     
2315                     // It's not a cell: so true turns into 1 and all else turns into 0.
2316                     JITCompiler::Jump isBoolean = m_jit.branch32(JITCompiler::Equal, tagGPR, TrustedImm32(JSValue::BooleanTag));
2317                     m_jit.move(TrustedImm32(0), resultGpr);
2318                     converted.append(m_jit.jump());
2319                     
2320                     isBoolean.link(&m_jit);
2321                     m_jit.move(payloadGPR, resultGpr);
2322                     converted.append(m_jit.jump());
2323                     
2324                     isNumber.link(&m_jit);
2325                 }
2326
2327                 unboxDouble(tagGPR, payloadGPR, fpr, scratch.fpr());
2328
2329                 silentSpillAllRegisters(resultGpr);
2330                 callOperation(operationToInt32, resultGpr, fpr);
2331                 silentFillAllRegisters(resultGpr);
2332
2333                 converted.append(m_jit.jump());
2334
2335                 isInteger.link(&m_jit);
2336                 m_jit.move(payloadGPR, resultGpr);
2337
2338                 converted.link(&m_jit);
2339             }
2340 #endif
2341             int32Result(resultGpr, node);
2342             return;
2343         }
2344         case GeneratedOperandTypeUnknown:
2345             RELEASE_ASSERT(!m_compileOkay);
2346             return;
2347         }
2348         RELEASE_ASSERT_NOT_REACHED();
2349         return;
2350     }
2351     
2352     default:
2353         ASSERT(!m_compileOkay);
2354         return;
2355     }
2356 }
2357
2358 void SpeculativeJIT::compileUInt32ToNumber(Node* node)
2359 {
2360     if (doesOverflow(node->arithMode())) {
2361         if (enableInt52()) {
2362             SpeculateInt32Operand op1(this, node->child1());
2363             GPRTemporary result(this, Reuse, op1);
2364             m_jit.zeroExtend32ToPtr(op1.gpr(), result.gpr());
2365             strictInt52Result(result.gpr(), node);
2366             return;
2367         }
2368         SpeculateInt32Operand op1(this, node->child1());
2369         FPRTemporary result(this);
2370             
2371         GPRReg inputGPR = op1.gpr();
2372         FPRReg outputFPR = result.fpr();
2373             
2374         m_jit.convertInt32ToDouble(inputGPR, outputFPR);
2375             
2376         JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, inputGPR, TrustedImm32(0));
2377         m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), outputFPR);
2378         positive.link(&m_jit);
2379             
2380         doubleResult(outputFPR, node);
2381         return;
2382     }
2383     
2384     RELEASE_ASSERT(node->arithMode() == Arith::CheckOverflow);
2385
2386     SpeculateInt32Operand op1(this, node->child1());
2387     GPRTemporary result(this);
2388
2389     m_jit.move(op1.gpr(), result.gpr());
2390
2391     speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, result.gpr(), TrustedImm32(0)));
2392
2393     int32Result(result.gpr(), node, op1.format());
2394 }
2395
2396 void SpeculativeJIT::compileDoubleAsInt32(Node* node)
2397 {
2398     SpeculateDoubleOperand op1(this, node->child1());
2399     FPRTemporary scratch(this);
2400     GPRTemporary result(this);
2401     
2402     FPRReg valueFPR = op1.fpr();
2403     FPRReg scratchFPR = scratch.fpr();
2404     GPRReg resultGPR = result.gpr();
2405
2406     JITCompiler::JumpList failureCases;
2407     RELEASE_ASSERT(shouldCheckOverflow(node->arithMode()));
2408     m_jit.branchConvertDoubleToInt32(
2409         valueFPR, resultGPR, failureCases, scratchFPR,
2410         shouldCheckNegativeZero(node->arithMode()));
2411     speculationCheck(Overflow, JSValueRegs(), 0, failureCases);
2412
2413     int32Result(resultGPR, node);
2414 }
2415
2416 void SpeculativeJIT::compileDoubleRep(Node* node)
2417 {
2418     switch (node->child1().useKind()) {
2419     case RealNumberUse: {
2420         JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2421         FPRTemporary result(this);
2422         
2423         JSValueRegs op1Regs = op1.jsValueRegs();
2424         FPRReg resultFPR = result.fpr();
2425         
2426 #if USE(JSVALUE64)
2427         GPRTemporary temp(this);
2428         GPRReg tempGPR = temp.gpr();
2429         m_jit.unboxDoubleWithoutAssertions(op1Regs.gpr(), tempGPR, resultFPR);
2430 #else
2431         FPRTemporary temp(this);
2432         FPRReg tempFPR = temp.fpr();
2433         unboxDouble(op1Regs.tagGPR(), op1Regs.payloadGPR(), resultFPR, tempFPR);
2434 #endif
2435         
2436         JITCompiler::Jump done = m_jit.branchDouble(
2437             JITCompiler::DoubleEqual, resultFPR, resultFPR);
2438         
2439         DFG_TYPE_CHECK(
2440             op1Regs, node->child1(), SpecBytecodeRealNumber, m_jit.branchIfNotInt32(op1Regs));
2441         m_jit.convertInt32ToDouble(op1Regs.payloadGPR(), resultFPR);
2442         
2443         done.link(&m_jit);
2444         
2445         doubleResult(resultFPR, node);
2446         return;
2447     }
2448     
2449     case NotCellUse:
2450     case NumberUse: {
2451         ASSERT(!node->child1()->isNumberConstant()); // This should have been constant folded.
2452
2453         SpeculatedType possibleTypes = m_state.forNode(node->child1()).m_type;
2454         if (isInt32Speculation(possibleTypes)) {
2455             SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
2456             FPRTemporary result(this);
2457             m_jit.convertInt32ToDouble(op1.gpr(), result.fpr());
2458             doubleResult(result.fpr(), node);
2459             return;
2460         }
2461
2462         JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2463         FPRTemporary result(this);
2464
2465 #if USE(JSVALUE64)
2466         GPRTemporary temp(this);
2467
2468         GPRReg op1GPR = op1.gpr();
2469         GPRReg tempGPR = temp.gpr();
2470         FPRReg resultFPR = result.fpr();
2471         JITCompiler::JumpList done;
2472
2473         JITCompiler::Jump isInteger = m_jit.branch64(
2474             MacroAssembler::AboveOrEqual, op1GPR, GPRInfo::tagTypeNumberRegister);
2475
2476         if (node->child1().useKind() == NotCellUse) {
2477             JITCompiler::Jump isNumber = m_jit.branchTest64(MacroAssembler::NonZero, op1GPR, GPRInfo::tagTypeNumberRegister);
2478             JITCompiler::Jump isUndefined = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueUndefined));
2479
2480             static const double zero = 0;
2481             m_jit.loadDouble(TrustedImmPtr(&zero), resultFPR);
2482
2483             JITCompiler::Jump isNull = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueNull));
2484             done.append(isNull);
2485
2486             DFG_TYPE_CHECK(JSValueRegs(op1GPR), node->child1(), ~SpecCell,
2487                 m_jit.branchTest64(JITCompiler::Zero, op1GPR, TrustedImm32(static_cast<int32_t>(TagBitBool))));
2488
2489             JITCompiler::Jump isFalse = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueFalse));
2490             static const double one = 1;
2491             m_jit.loadDouble(TrustedImmPtr(&one), resultFPR);
2492             done.append(m_jit.jump());
2493             done.append(isFalse);
2494
2495             isUndefined.link(&m_jit);
2496             static const double NaN = PNaN;
2497             m_jit.loadDouble(TrustedImmPtr(&NaN), resultFPR);
2498             done.append(m_jit.jump());
2499
2500             isNumber.link(&m_jit);
2501         } else if (needsTypeCheck(node->child1(), SpecBytecodeNumber)) {
2502             typeCheck(
2503                 JSValueRegs(op1GPR), node->child1(), SpecBytecodeNumber,
2504                 m_jit.branchTest64(MacroAssembler::Zero, op1GPR, GPRInfo::tagTypeNumberRegister));
2505         }
2506
2507         unboxDouble(op1GPR, tempGPR, resultFPR);
2508         done.append(m_jit.jump());
2509     
2510         isInteger.link(&m_jit);
2511         m_jit.convertInt32ToDouble(op1GPR, resultFPR);
2512         done.link(&m_jit);
2513 #else // USE(JSVALUE64) -> this is the 32_64 case
2514         FPRTemporary temp(this);
2515     
2516         GPRReg op1TagGPR = op1.tagGPR();
2517         GPRReg op1PayloadGPR = op1.payloadGPR();
2518         FPRReg tempFPR = temp.fpr();
2519         FPRReg resultFPR = result.fpr();
2520         JITCompiler::JumpList done;
2521     
2522         JITCompiler::Jump isInteger = m_jit.branch32(
2523             MacroAssembler::Equal, op1TagGPR, TrustedImm32(JSValue::Int32Tag));
2524
2525         if (node->child1().useKind() == NotCellUse) {
2526             JITCompiler::Jump isNumber = m_jit.branch32(JITCompiler::Below, op1TagGPR, JITCompiler::TrustedImm32(JSValue::LowestTag + 1));
2527             JITCompiler::Jump isUndefined = m_jit.branch32(JITCompiler::Equal, op1TagGPR, TrustedImm32(JSValue::UndefinedTag));
2528
2529             static const double zero = 0;
2530             m_jit.loadDouble(TrustedImmPtr(&zero), resultFPR);
2531
2532             JITCompiler::Jump isNull = m_jit.branch32(JITCompiler::Equal, op1TagGPR, TrustedImm32(JSValue::NullTag));
2533             done.append(isNull);
2534
2535             DFG_TYPE_CHECK(JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), ~SpecCell, m_jit.branch32(JITCompiler::NotEqual, op1TagGPR, TrustedImm32(JSValue::BooleanTag)));
2536
2537             JITCompiler::Jump isFalse = m_jit.branchTest32(JITCompiler::Zero, op1PayloadGPR, TrustedImm32(1));
2538             static const double one = 1;
2539             m_jit.loadDouble(TrustedImmPtr(&one), resultFPR);
2540             done.append(m_jit.jump());
2541             done.append(isFalse);
2542
2543             isUndefined.link(&m_jit);
2544             static const double NaN = PNaN;
2545             m_jit.loadDouble(TrustedImmPtr(&NaN), resultFPR);
2546             done.append(m_jit.jump());
2547
2548             isNumber.link(&m_jit);
2549         } else if (needsTypeCheck(node->child1(), SpecBytecodeNumber)) {
2550             typeCheck(
2551                 JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), SpecBytecodeNumber,
2552                 m_jit.branch32(MacroAssembler::AboveOrEqual, op1TagGPR, TrustedImm32(JSValue::LowestTag)));
2553         }
2554
2555         unboxDouble(op1TagGPR, op1PayloadGPR, resultFPR, tempFPR);
2556         done.append(m_jit.jump());
2557     
2558         isInteger.link(&m_jit);
2559         m_jit.convertInt32ToDouble(op1PayloadGPR, resultFPR);
2560         done.link(&m_jit);
2561 #endif // USE(JSVALUE64)
2562     
2563         doubleResult(resultFPR, node);
2564         return;
2565     }
2566         
2567 #if USE(JSVALUE64)
2568     case Int52RepUse: {
2569         SpeculateStrictInt52Operand value(this, node->child1());
2570         FPRTemporary result(this);
2571         
2572         GPRReg valueGPR = value.gpr();
2573         FPRReg resultFPR = result.fpr();
2574
2575         m_jit.convertInt64ToDouble(valueGPR, resultFPR);
2576         
2577         doubleResult(resultFPR, node);
2578         return;
2579     }
2580 #endif // USE(JSVALUE64)
2581         
2582     default:
2583         RELEASE_ASSERT_NOT_REACHED();
2584         return;
2585     }
2586 }
2587
2588 void SpeculativeJIT::compileValueRep(Node* node)
2589 {
2590     switch (node->child1().useKind()) {
2591     case DoubleRepUse: {
2592         SpeculateDoubleOperand value(this, node->child1());
2593         JSValueRegsTemporary result(this);
2594         
2595         FPRReg valueFPR = value.fpr();
2596         JSValueRegs resultRegs = result.regs();
2597         
2598         // It's very tempting to in-place filter the value to indicate that it's not impure NaN
2599         // anymore. Unfortunately, this would be unsound. If it's a GetLocal or if the value was
2600         // subject to a prior SetLocal, filtering the value would imply that the corresponding
2601         // local was purified.
2602         if (needsTypeCheck(node->child1(), ~SpecDoubleImpureNaN))
2603             m_jit.purifyNaN(valueFPR);
2604
2605         boxDouble(valueFPR, resultRegs);
2606         
2607         jsValueResult(resultRegs, node);
2608         return;
2609     }
2610         
2611 #if USE(JSVALUE64)
2612     case Int52RepUse: {
2613         SpeculateStrictInt52Operand value(this, node->child1());
2614         GPRTemporary result(this);
2615         
2616         GPRReg valueGPR = value.gpr();
2617         GPRReg resultGPR = result.gpr();
2618         
2619         boxInt52(valueGPR, resultGPR, DataFormatStrictInt52);
2620         
2621         jsValueResult(resultGPR, node);
2622         return;
2623     }
2624 #endif // USE(JSVALUE64)
2625         
2626     default:
2627         RELEASE_ASSERT_NOT_REACHED();
2628         return;
2629     }
2630 }
2631
2632 static double clampDoubleToByte(double d)
2633 {
2634     d += 0.5;
2635     if (!(d > 0))
2636         d = 0;
2637     else if (d > 255)
2638         d = 255;
2639     return d;
2640 }
2641
2642 static void compileClampIntegerToByte(JITCompiler& jit, GPRReg result)
2643 {
2644     MacroAssembler::Jump inBounds = jit.branch32(MacroAssembler::BelowOrEqual, result, JITCompiler::TrustedImm32(0xff));
2645     MacroAssembler::Jump tooBig = jit.branch32(MacroAssembler::GreaterThan, result, JITCompiler::TrustedImm32(0xff));
2646     jit.xorPtr(result, result);
2647     MacroAssembler::Jump clamped = jit.jump();
2648     tooBig.link(&jit);
2649     jit.move(JITCompiler::TrustedImm32(255), result);
2650     clamped.link(&jit);
2651     inBounds.link(&jit);
2652 }
2653
2654 static void compileClampDoubleToByte(JITCompiler& jit, GPRReg result, FPRReg source, FPRReg scratch)
2655 {
2656     // Unordered compare so we pick up NaN
2657     static const double zero = 0;
2658     static const double byteMax = 255;
2659     static const double half = 0.5;
2660     jit.loadDouble(JITCompiler::TrustedImmPtr(&zero), scratch);
2661     MacroAssembler::Jump tooSmall = jit.branchDouble(MacroAssembler::DoubleLessThanOrEqualOrUnordered, source, scratch);
2662     jit.loadDouble(JITCompiler::TrustedImmPtr(&byteMax), scratch);
2663     MacroAssembler::Jump tooBig = jit.branchDouble(MacroAssembler::DoubleGreaterThan, source, scratch);
2664     
2665     jit.loadDouble(JITCompiler::TrustedImmPtr(&half), scratch);
2666     // FIXME: This should probably just use a floating point round!
2667     // https://bugs.webkit.org/show_bug.cgi?id=72054
2668     jit.addDouble(source, scratch);
2669     jit.truncateDoubleToInt32(scratch, result);   
2670     MacroAssembler::Jump truncatedInt = jit.jump();
2671     
2672     tooSmall.link(&jit);
2673     jit.xorPtr(result, result);
2674     MacroAssembler::Jump zeroed = jit.jump();
2675     
2676     tooBig.link(&jit);
2677     jit.move(JITCompiler::TrustedImm32(255), result);
2678     
2679     truncatedInt.link(&jit);
2680     zeroed.link(&jit);
2681
2682 }
2683
2684 JITCompiler::Jump SpeculativeJIT::jumpForTypedArrayOutOfBounds(Node* node, GPRReg baseGPR, GPRReg indexGPR)
2685 {
2686     if (node->op() == PutByValAlias)
2687         return JITCompiler::Jump();
2688     JSArrayBufferView* view = m_jit.graph().tryGetFoldableView(
2689         m_state.forNode(m_jit.graph().child(node, 0)).m_value, node->arrayMode());
2690     if (view) {
2691         uint32_t length = view->length();
2692         Node* indexNode = m_jit.graph().child(node, 1).node();
2693         if (indexNode->isInt32Constant() && indexNode->asUInt32() < length)
2694             return JITCompiler::Jump();
2695         return m_jit.branch32(
2696             MacroAssembler::AboveOrEqual, indexGPR, MacroAssembler::Imm32(length));
2697     }
2698     return m_jit.branch32(
2699         MacroAssembler::AboveOrEqual, indexGPR,
2700         MacroAssembler::Address(baseGPR, JSArrayBufferView::offsetOfLength()));
2701 }
2702
2703 void SpeculativeJIT::emitTypedArrayBoundsCheck(Node* node, GPRReg baseGPR, GPRReg indexGPR)
2704 {
2705     JITCompiler::Jump jump = jumpForTypedArrayOutOfBounds(node, baseGPR, indexGPR);
2706     if (!jump.isSet())
2707         return;
2708     speculationCheck(OutOfBounds, JSValueRegs(), 0, jump);
2709 }
2710
2711 JITCompiler::Jump SpeculativeJIT::jumpForTypedArrayIsNeuteredIfOutOfBounds(Node* node, GPRReg base, JITCompiler::Jump outOfBounds)
2712 {
2713     JITCompiler::Jump done;
2714     if (outOfBounds.isSet()) {
2715         done = m_jit.jump();
2716         if (node->arrayMode().isInBounds())
2717             speculationCheck(OutOfBounds, JSValueSource(), 0, outOfBounds);
2718         else {
2719             outOfBounds.link(&m_jit);
2720
2721             JITCompiler::Jump notWasteful = m_jit.branch32(
2722                 MacroAssembler::NotEqual,
2723                 MacroAssembler::Address(base, JSArrayBufferView::offsetOfMode()),
2724                 TrustedImm32(WastefulTypedArray));
2725
2726             JITCompiler::Jump hasNullVector = m_jit.branchTestPtr(
2727                 MacroAssembler::Zero,
2728                 MacroAssembler::Address(base, JSArrayBufferView::offsetOfVector()));
2729             speculationCheck(Uncountable, JSValueSource(), node, hasNullVector);
2730             notWasteful.link(&m_jit);
2731         }
2732     }
2733     return done;
2734 }
2735
2736 void SpeculativeJIT::loadFromIntTypedArray(GPRReg storageReg, GPRReg propertyReg, GPRReg resultReg, TypedArrayType type)
2737 {
2738     switch (elementSize(type)) {
2739     case 1:
2740         if (isSigned(type))
2741             m_jit.load8SignedExtendTo32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2742         else
2743             m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2744         break;
2745     case 2:
2746         if (isSigned(type))
2747             m_jit.load16SignedExtendTo32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2748         else
2749             m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2750         break;
2751     case 4:
2752         m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
2753         break;
2754     default:
2755         CRASH();
2756     }
2757 }
2758
2759 void SpeculativeJIT::setIntTypedArrayLoadResult(Node* node, GPRReg resultReg, TypedArrayType type, bool canSpeculate)
2760 {
2761     if (elementSize(type) < 4 || isSigned(type)) {
2762         int32Result(resultReg, node);
2763         return;
2764     }
2765     
2766     ASSERT(elementSize(type) == 4 && !isSigned(type));
2767     if (node->shouldSpeculateInt32() && canSpeculate) {
2768         speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, resultReg, TrustedImm32(0)));
2769         int32Result(resultReg, node);
2770         return;
2771     }
2772     
2773 #if USE(JSVALUE64)
2774     if (node->shouldSpeculateAnyInt()) {
2775         m_jit.zeroExtend32ToPtr(resultReg, resultReg);
2776         strictInt52Result(resultReg, node);
2777         return;
2778     }
2779 #endif
2780     
2781     FPRTemporary fresult(this);
2782     m_jit.convertInt32ToDouble(resultReg, fresult.fpr());
2783     JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, resultReg, TrustedImm32(0));
2784     m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), fresult.fpr());
2785     positive.link(&m_jit);
2786     doubleResult(fresult.fpr(), node);
2787 }
2788
2789 void SpeculativeJIT::compileGetByValOnIntTypedArray(Node* node, TypedArrayType type)
2790 {
2791     ASSERT(isInt(type));
2792     
2793     SpeculateCellOperand base(this, node->child1());
2794     SpeculateStrictInt32Operand property(this, node->child2());
2795     StorageOperand storage(this, node->child3());
2796
2797     GPRReg baseReg = base.gpr();
2798     GPRReg propertyReg = property.gpr();
2799     GPRReg storageReg = storage.gpr();
2800
2801     GPRTemporary result(this);
2802     GPRReg resultReg = result.gpr();
2803
2804     ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2805
2806     emitTypedArrayBoundsCheck(node, baseReg, propertyReg);
2807     loadFromIntTypedArray(storageReg, propertyReg, resultReg, type);
2808     bool canSpeculate = true;
2809     setIntTypedArrayLoadResult(node, resultReg, type, canSpeculate);
2810 }
2811
2812 bool SpeculativeJIT::getIntTypedArrayStoreOperand(
2813     GPRTemporary& value,
2814     GPRReg property,
2815 #if USE(JSVALUE32_64)
2816     GPRTemporary& propertyTag,
2817     GPRTemporary& valueTag,
2818 #endif
2819     Edge valueUse, JITCompiler::JumpList& slowPathCases, bool isClamped)
2820 {
2821     bool isAppropriateConstant = false;
2822     if (valueUse->isConstant()) {
2823         JSValue jsValue = valueUse->asJSValue();
2824         SpeculatedType expectedType = typeFilterFor(valueUse.useKind());
2825         SpeculatedType actualType = speculationFromValue(jsValue);
2826         isAppropriateConstant = (expectedType | actualType) == expectedType;
2827     }
2828     
2829     if (isAppropriateConstant) {
2830         JSValue jsValue = valueUse->asJSValue();
2831         if (!jsValue.isNumber()) {
2832             terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
2833             return false;
2834         }
2835         double d = jsValue.asNumber();
2836         if (isClamped)
2837             d = clampDoubleToByte(d);
2838         GPRTemporary scratch(this);
2839         GPRReg scratchReg = scratch.gpr();
2840         m_jit.move(Imm32(toInt32(d)), scratchReg);
2841         value.adopt(scratch);
2842     } else {
2843         switch (valueUse.useKind()) {
2844         case Int32Use: {
2845             SpeculateInt32Operand valueOp(this, valueUse);
2846             GPRTemporary scratch(this);
2847             GPRReg scratchReg = scratch.gpr();
2848             m_jit.move(valueOp.gpr(), scratchReg);
2849             if (isClamped)
2850                 compileClampIntegerToByte(m_jit, scratchReg);
2851             value.adopt(scratch);
2852             break;
2853         }
2854             
2855 #if USE(JSVALUE64)
2856         case Int52RepUse: {
2857             SpeculateStrictInt52Operand valueOp(this, valueUse);
2858             GPRTemporary scratch(this);
2859             GPRReg scratchReg = scratch.gpr();
2860             m_jit.move(valueOp.gpr(), scratchReg);
2861             if (isClamped) {
2862                 MacroAssembler::Jump inBounds = m_jit.branch64(
2863                     MacroAssembler::BelowOrEqual, scratchReg, JITCompiler::TrustedImm64(0xff));
2864                 MacroAssembler::Jump tooBig = m_jit.branch64(
2865                     MacroAssembler::GreaterThan, scratchReg, JITCompiler::TrustedImm64(0xff));
2866                 m_jit.move(TrustedImm32(0), scratchReg);
2867                 MacroAssembler::Jump clamped = m_jit.jump();
2868                 tooBig.link(&m_jit);
2869                 m_jit.move(JITCompiler::TrustedImm32(255), scratchReg);
2870                 clamped.link(&m_jit);
2871                 inBounds.link(&m_jit);
2872             }
2873             value.adopt(scratch);
2874             break;
2875         }
2876 #endif // USE(JSVALUE64)
2877             
2878         case DoubleRepUse: {
2879             RELEASE_ASSERT(!isAtomicsIntrinsic(m_currentNode->op()));
2880             if (isClamped) {
2881                 SpeculateDoubleOperand valueOp(this, valueUse);
2882                 GPRTemporary result(this);
2883                 FPRTemporary floatScratch(this);
2884                 FPRReg fpr = valueOp.fpr();
2885                 GPRReg gpr = result.gpr();
2886                 compileClampDoubleToByte(m_jit, gpr, fpr, floatScratch.fpr());
2887                 value.adopt(result);
2888             } else {
2889 #if USE(JSVALUE32_64)
2890                 GPRTemporary realPropertyTag(this);
2891                 propertyTag.adopt(realPropertyTag);
2892                 GPRReg propertyTagGPR = propertyTag.gpr();
2893
2894                 GPRTemporary realValueTag(this);
2895                 valueTag.adopt(realValueTag);
2896                 GPRReg valueTagGPR = valueTag.gpr();
2897 #endif
2898                 SpeculateDoubleOperand valueOp(this, valueUse);
2899                 GPRTemporary result(this);
2900                 FPRReg fpr = valueOp.fpr();
2901                 GPRReg gpr = result.gpr();
2902                 MacroAssembler::Jump notNaN = m_jit.branchDouble(MacroAssembler::DoubleEqual, fpr, fpr);
2903                 m_jit.xorPtr(gpr, gpr);
2904                 MacroAssembler::JumpList fixed(m_jit.jump());
2905                 notNaN.link(&m_jit);
2906
2907                 fixed.append(m_jit.branchTruncateDoubleToInt32(
2908                     fpr, gpr, MacroAssembler::BranchIfTruncateSuccessful));
2909
2910 #if USE(JSVALUE64)
2911                 m_jit.or64(GPRInfo::tagTypeNumberRegister, property);
2912                 boxDouble(fpr, gpr);
2913 #else
2914                 UNUSED_PARAM(property);
2915                 m_jit.move(TrustedImm32(JSValue::Int32Tag), propertyTagGPR);
2916                 boxDouble(fpr, valueTagGPR, gpr);
2917 #endif
2918                 slowPathCases.append(m_jit.jump());
2919
2920                 fixed.link(&m_jit);
2921                 value.adopt(result);
2922             }
2923             break;
2924         }
2925             
2926         default:
2927             RELEASE_ASSERT_NOT_REACHED();
2928             break;
2929         }
2930     }
2931     return true;
2932 }
2933
2934 void SpeculativeJIT::compilePutByValForIntTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
2935 {
2936     ASSERT(isInt(type));
2937     
2938     StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
2939     GPRReg storageReg = storage.gpr();
2940     
2941     Edge valueUse = m_jit.graph().varArgChild(node, 2);
2942     
2943     GPRTemporary value;
2944 #if USE(JSVALUE32_64)
2945     GPRTemporary propertyTag;
2946     GPRTemporary valueTag;
2947 #endif
2948
2949     JITCompiler::JumpList slowPathCases;
2950     
2951     bool result = getIntTypedArrayStoreOperand(
2952         value, property,
2953 #if USE(JSVALUE32_64)
2954         propertyTag, valueTag,
2955 #endif
2956         valueUse, slowPathCases, isClamped(type));
2957     if (!result) {
2958         noResult(node);
2959         return;
2960     }
2961
2962     GPRReg valueGPR = value.gpr();
2963 #if USE(JSVALUE32_64)
2964     GPRReg propertyTagGPR = propertyTag.gpr();
2965     GPRReg valueTagGPR = valueTag.gpr();
2966 #endif
2967
2968     ASSERT_UNUSED(valueGPR, valueGPR != property);
2969     ASSERT(valueGPR != base);
2970     ASSERT(valueGPR != storageReg);
2971     JITCompiler::Jump outOfBounds = jumpForTypedArrayOutOfBounds(node, base, property);
2972
2973     switch (elementSize(type)) {
2974     case 1:
2975         m_jit.store8(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesOne));
2976         break;
2977     case 2:
2978         m_jit.store16(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesTwo));
2979         break;
2980     case 4:
2981         m_jit.store32(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
2982         break;
2983     default:
2984         CRASH();
2985     }
2986
2987     JITCompiler::Jump done = jumpForTypedArrayIsNeuteredIfOutOfBounds(node, base, outOfBounds);
2988     if (done.isSet())
2989         done.link(&m_jit);
2990
2991     if (!slowPathCases.empty()) {
2992 #if USE(JSVALUE64)
2993         if (node->op() == PutByValDirect) {
2994             addSlowPathGenerator(slowPathCall(
2995                 slowPathCases, this,
2996                 m_jit.isStrictModeFor(node->origin.semantic) ? operationPutByValDirectStrict : operationPutByValDirectNonStrict,
2997                 NoResult, base, property, valueGPR));
2998         } else {
2999             addSlowPathGenerator(slowPathCall(
3000                 slowPathCases, this,
3001                 m_jit.isStrictModeFor(node->origin.semantic) ? operationPutByValStrict : operationPutByValNonStrict,
3002                 NoResult, base, property, valueGPR));
3003         }
3004 #else // not USE(JSVALUE64)
3005         if (node->op() == PutByValDirect) {
3006             addSlowPathGenerator(slowPathCall(
3007                 slowPathCases, this,
3008                 m_jit.codeBlock()->isStrictMode() ? operationPutByValDirectCellStrict : operationPutByValDirectCellNonStrict,
3009                 NoResult, base, JSValueRegs(propertyTagGPR, property), JSValueRegs(valueTagGPR, valueGPR)));
3010         } else {
3011             addSlowPathGenerator(slowPathCall(
3012                 slowPathCases, this,
3013                 m_jit.codeBlock()->isStrictMode() ? operationPutByValCellStrict : operationPutByValCellNonStrict,
3014                 NoResult, base, JSValueRegs(propertyTagGPR, property), JSValueRegs(valueTagGPR, valueGPR)));
3015         }
3016 #endif
3017     }
3018     
3019     noResult(node);
3020 }
3021
3022 void SpeculativeJIT::compileGetByValOnFloatTypedArray(Node* node, TypedArrayType type)
3023 {
3024     ASSERT(isFloat(type));
3025     
3026     SpeculateCellOperand base(this, node->child1());
3027     SpeculateStrictInt32Operand property(this, node->child2());
3028     StorageOperand storage(this, node->child3());
3029
3030     GPRReg baseReg = base.gpr();
3031     GPRReg propertyReg = property.gpr();
3032     GPRReg storageReg = storage.gpr();
3033
3034     ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
3035
3036     FPRTemporary result(this);
3037     FPRReg resultReg = result.fpr();
3038     emitTypedArrayBoundsCheck(node, baseReg, propertyReg);
3039     switch (elementSize(type)) {
3040     case 4:
3041         m_jit.loadFloat(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
3042         m_jit.convertFloatToDouble(resultReg, resultReg);
3043         break;
3044     case 8: {
3045         m_jit.loadDouble(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight), resultReg);
3046         break;
3047     }
3048     default:
3049         RELEASE_ASSERT_NOT_REACHED();
3050     }
3051     
3052     doubleResult(resultReg, node);
3053 }
3054
3055 void SpeculativeJIT::compilePutByValForFloatTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
3056 {
3057     ASSERT(isFloat(type));
3058     
3059     StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
3060     GPRReg storageReg = storage.gpr();
3061     
3062     Edge baseUse = m_jit.graph().varArgChild(node, 0);
3063     Edge valueUse = m_jit.graph().varArgChild(node, 2);
3064
3065     SpeculateDoubleOperand valueOp(this, valueUse);
3066     FPRTemporary scratch(this);
3067     FPRReg valueFPR = valueOp.fpr();
3068     FPRReg scratchFPR = scratch.fpr();
3069
3070     ASSERT_UNUSED(baseUse, node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(baseUse)));
3071     
3072     MacroAssembler::Jump outOfBounds = jumpForTypedArrayOutOfBounds(node, base, property);
3073     
3074     switch (elementSize(type)) {
3075     case 4: {
3076         m_jit.moveDouble(valueFPR, scratchFPR);
3077         m_jit.convertDoubleToFloat(valueFPR, scratchFPR);
3078         m_jit.storeFloat(scratchFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
3079         break;
3080     }
3081     case 8:
3082         m_jit.storeDouble(valueFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesEight));
3083         break;
3084     default:
3085         RELEASE_ASSERT_NOT_REACHED();
3086     }
3087
3088     JITCompiler::Jump done = jumpForTypedArrayIsNeuteredIfOutOfBounds(node, base, outOfBounds);
3089     if (done.isSet())
3090         done.link(&m_jit);
3091     noResult(node);
3092 }
3093
3094 void SpeculativeJIT::compileInstanceOfForObject(Node*, GPRReg valueReg, GPRReg prototypeReg, GPRReg scratchReg, GPRReg scratch2Reg)
3095 {
3096     // Check that prototype is an object.
3097     speculationCheck(BadType, JSValueRegs(), 0, m_jit.branchIfNotObject(prototypeReg));
3098     
3099     // Initialize scratchReg with the value being checked.
3100     m_jit.move(valueReg, scratchReg);
3101     
3102     // Walk up the prototype chain of the value (in scratchReg), comparing to prototypeReg.
3103     MacroAssembler::Label loop(&m_jit);
3104     MacroAssembler::Jump performDefaultHasInstance = m_jit.branch8(MacroAssembler::Equal,
3105         MacroAssembler::Address(scratchReg, JSCell::typeInfoTypeOffset()), TrustedImm32(ProxyObjectType));
3106     m_jit.emitLoadStructure(*m_jit.vm(), scratchReg, scratchReg, scratch2Reg);
3107     m_jit.loadPtr(MacroAssembler::Address(scratchReg, Structure::prototypeOffset() + CellPayloadOffset), scratchReg);
3108     MacroAssembler::Jump isInstance = m_jit.branchPtr(MacroAssembler::Equal, scratchReg, prototypeReg);
3109 #if USE(JSVALUE64)
3110     m_jit.branchIfCell(JSValueRegs(scratchReg)).linkTo(loop, &m_jit);
3111 #else
3112     m_jit.branchTestPtr(MacroAssembler::NonZero, scratchReg).linkTo(loop, &m_jit);
3113 #endif
3114     
3115     // No match - result is false.
3116 #if USE(JSVALUE64)
3117     m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(false))), scratchReg);
3118 #else
3119     m_jit.move(MacroAssembler::TrustedImm32(0), scratchReg);
3120 #endif
3121     MacroAssembler::JumpList doneJumps; 
3122     doneJumps.append(m_jit.jump());
3123
3124     performDefaultHasInstance.link(&m_jit);
3125     silentSpillAllRegisters(scratchReg);
3126     callOperation(operationDefaultHasInstance, scratchReg, valueReg, prototypeReg); 
3127     silentFillAllRegisters(scratchReg);
3128     m_jit.exceptionCheck();
3129 #if USE(JSVALUE64)
3130     m_jit.or32(TrustedImm32(ValueFalse), scratchReg);
3131 #endif
3132     doneJumps.append(m_jit.jump());
3133     
3134     isInstance.link(&m_jit);
3135 #if USE(JSVALUE64)
3136     m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(true))), scratchReg);
3137 #else
3138     m_jit.move(MacroAssembler::TrustedImm32(1), scratchReg);
3139 #endif
3140     
3141     doneJumps.link(&m_jit);
3142 }
3143
3144 void SpeculativeJIT::compileCheckTypeInfoFlags(Node* node)
3145 {
3146     SpeculateCellOperand base(this, node->child1());
3147
3148     GPRReg baseGPR = base.gpr();
3149
3150     speculationCheck(BadTypeInfoFlags, JSValueRegs(), 0, m_jit.branchTest8(MacroAssembler::Zero, MacroAssembler::Address(baseGPR, JSCell::typeInfoFlagsOffset()), MacroAssembler::TrustedImm32(node->typeInfoOperand())));
3151
3152     noResult(node);
3153 }
3154
3155 void SpeculativeJIT::compileParseInt(Node* node)
3156 {
3157     RELEASE_ASSERT(node->child1().useKind() == UntypedUse || node->child1().useKind() == StringUse);
3158
3159     GPRFlushedCallResult resultPayload(this);
3160     GPRReg resultPayloadGPR = resultPayload.gpr();
3161 #if USE(JSVALUE64)
3162     JSValueRegs resultRegs(resultPayloadGPR);
3163 #else
3164     GPRFlushedCallResult2 resultTag(this);
3165     GPRReg resultTagGPR = resultTag.gpr();
3166     JSValueRegs resultRegs(resultTagGPR, resultPayloadGPR);
3167 #endif
3168
3169     if (node->child2()) {
3170         SpeculateInt32Operand radix(this, node->child2());
3171         GPRReg radixGPR = radix.gpr();
3172         if (node->child1().useKind() == UntypedUse) {
3173             JSValueOperand value(this, node->child1());
3174 #if USE(JSVALUE64)
3175             auto result = resultRegs.gpr();
3176             auto valueReg = value.gpr();
3177 #else
3178             auto result = resultRegs;
3179             auto valueReg = value.jsValueRegs();
3180 #endif
3181
3182             flushRegisters();
3183             callOperation(operationParseIntGeneric, result, valueReg, radixGPR);
3184             m_jit.exceptionCheck();
3185         } else {
3186             SpeculateCellOperand value(this, node->child1());
3187             GPRReg valueGPR = value.gpr();
3188             speculateString(node->child1(), valueGPR);
3189
3190 #if USE(JSVALUE64)
3191             auto result = resultRegs.gpr();
3192 #else
3193             auto result = resultRegs;
3194 #endif
3195
3196             flushRegisters();
3197             callOperation(operationParseIntString, result, valueGPR, radixGPR);
3198             m_jit.exceptionCheck();
3199         }
3200     } else {
3201         if (node->child1().useKind() == UntypedUse) {
3202             JSValueOperand value(this, node->child1());
3203 #if USE(JSVALUE64)
3204             auto result = resultRegs.gpr();
3205 #else
3206             auto result = resultRegs;
3207 #endif
3208             JSValueRegs valueRegs = value.jsValueRegs();
3209
3210             flushRegisters();
3211             callOperation(operationParseIntNoRadixGeneric, result, valueRegs);
3212             m_jit.exceptionCheck();
3213         } else {
3214             SpeculateCellOperand value(this, node->child1());
3215             GPRReg valueGPR = value.gpr();
3216             speculateString(node->child1(), valueGPR);
3217
3218             flushRegisters();
3219             callOperation(operationParseIntStringNoRadix, resultRegs, valueGPR);
3220             m_jit.exceptionCheck();
3221         }
3222     }
3223
3224     jsValueResult(resultRegs, node);
3225 }
3226
3227 void SpeculativeJIT::compileInstanceOf(Node* node)
3228 {
3229     if (node->child1().useKind() == UntypedUse) {
3230         // It might not be a cell. Speculate less aggressively.
3231         // Or: it might only be used once (i.e. by us), so we get zero benefit
3232         // from speculating any more aggressively than we absolutely need to.
3233         
3234         JSValueOperand value(this, node->child1());
3235         SpeculateCellOperand prototype(this, node->child2());
3236         GPRTemporary scratch(this);
3237         GPRTemporary scratch2(this);
3238         
3239         GPRReg prototypeReg = prototype.gpr();
3240         GPRReg scratchReg = scratch.gpr();
3241         GPRReg scratch2Reg = scratch2.gpr();
3242         
3243         MacroAssembler::Jump isCell = m_jit.branchIfCell(value.jsValueRegs());
3244         GPRReg valueReg = value.jsValueRegs().payloadGPR();
3245         moveFalseTo(scratchReg);
3246
3247         MacroAssembler::Jump done = m_jit.jump();
3248         
3249         isCell.link(&m_jit);
3250         
3251         compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg, scratch2Reg);
3252         
3253         done.link(&m_jit);
3254
3255         blessedBooleanResult(scratchReg, node);
3256         return;
3257     }
3258     
3259     SpeculateCellOperand value(this, node->child1());
3260     SpeculateCellOperand prototype(this, node->child2());
3261     
3262     GPRTemporary scratch(this);
3263     GPRTemporary scratch2(this);
3264     
3265     GPRReg valueReg = value.gpr();
3266     GPRReg prototypeReg = prototype.gpr();
3267     GPRReg scratchReg = scratch.gpr();
3268     GPRReg scratch2Reg = scratch2.gpr();
3269     
3270     compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg, scratch2Reg);
3271
3272     blessedBooleanResult(scratchReg, node);
3273 }
3274
3275 template<typename SnippetGenerator, J_JITOperation_EJJ snippetSlowPathFunction>
3276 void SpeculativeJIT::emitUntypedBitOp(Node* node)
3277 {
3278     Edge& leftChild = node->child1();
3279     Edge& rightChild = node->child2();
3280
3281     if (isKnownNotNumber(leftChild.node()) || isKnownNotNumber(rightChild.node())) {
3282         JSValueOperand left(this, leftChild);
3283         JSValueOperand right(this, rightChild);
3284         JSValueRegs leftRegs = left.jsValueRegs();
3285         JSValueRegs rightRegs = right.jsValueRegs();
3286 #if USE(JSVALUE64)
3287         GPRTemporary result(this);
3288         JSValueRegs resultRegs = JSValueRegs(result.gpr());
3289 #else
3290         GPRTemporary resultTag(this);
3291         GPRTemporary resultPayload(this);
3292         JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3293 #endif
3294         flushRegisters();
3295         callOperation(snippetSlowPathFunction, resultRegs, leftRegs, rightRegs);
3296         m_jit.exceptionCheck();
3297
3298         jsValueResult(resultRegs, node);
3299         return;
3300     }
3301
3302     std::optional<JSValueOperand> left;
3303     std::optional<JSValueOperand> right;
3304
3305     JSValueRegs leftRegs;
3306     JSValueRegs rightRegs;
3307
3308 #if USE(JSVALUE64)
3309     GPRTemporary result(this);
3310     JSValueRegs resultRegs = JSValueRegs(result.gpr());
3311     GPRTemporary scratch(this);
3312     GPRReg scratchGPR = scratch.gpr();
3313 #else
3314     GPRTemporary resultTag(this);
3315     GPRTemporary resultPayload(this);
3316     JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3317     GPRReg scratchGPR = resultTag.gpr();
3318 #endif
3319
3320     SnippetOperand leftOperand;
3321     SnippetOperand rightOperand;
3322
3323     // The snippet generator does not support both operands being constant. If the left
3324     // operand is already const, we'll ignore the right operand's constness.
3325     if (leftChild->isInt32Constant())
3326         leftOperand.setConstInt32(leftChild->asInt32());
3327     else if (rightChild->isInt32Constant())
3328         rightOperand.setConstInt32(rightChild->asInt32());
3329
3330     RELEASE_ASSERT(!leftOperand.isConst() || !rightOperand.isConst());
3331
3332     if (!leftOperand.isConst()) {
3333         left.emplace(this, leftChild);
3334         leftRegs = left->jsValueRegs();
3335     }
3336     if (!rightOperand.isConst()) {
3337         right.emplace(this, rightChild);
3338         rightRegs = right->jsValueRegs();
3339     }
3340
3341     SnippetGenerator gen(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs, scratchGPR);
3342     gen.generateFastPath(m_jit);
3343
3344     ASSERT(gen.didEmitFastPath());
3345     gen.endJumpList().append(m_jit.jump());
3346
3347     gen.slowPathJumpList().link(&m_jit);
3348     silentSpillAllRegisters(resultRegs);