Strings and Vectors shouldn't do index masking
[WebKit-https.git] / Source / JavaScriptCore / dfg / DFGSpeculativeJIT.cpp
1 /*
2  * Copyright (C) 2011-2018 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "DFGSpeculativeJIT.h"
28
29 #if ENABLE(DFG_JIT)
30
31 #include "BinarySwitch.h"
32 #include "DFGAbstractInterpreterInlines.h"
33 #include "DFGArrayifySlowPathGenerator.h"
34 #include "DFGCallArrayAllocatorSlowPathGenerator.h"
35 #include "DFGCallCreateDirectArgumentsSlowPathGenerator.h"
36 #include "DFGCallCreateDirectArgumentsWithKnownLengthSlowPathGenerator.h"
37 #include "DFGCapabilities.h"
38 #include "DFGMayExit.h"
39 #include "DFGOSRExitFuzz.h"
40 #include "DFGSaneStringGetByValSlowPathGenerator.h"
41 #include "DFGSlowPathGenerator.h"
42 #include "DFGSnippetParams.h"
43 #include "DirectArguments.h"
44 #include "JITAddGenerator.h"
45 #include "JITBitAndGenerator.h"
46 #include "JITBitOrGenerator.h"
47 #include "JITBitXorGenerator.h"
48 #include "JITDivGenerator.h"
49 #include "JITLeftShiftGenerator.h"
50 #include "JITMulGenerator.h"
51 #include "JITRightShiftGenerator.h"
52 #include "JITSubGenerator.h"
53 #include "JSAsyncFunction.h"
54 #include "JSAsyncGeneratorFunction.h"
55 #include "JSCInlines.h"
56 #include "JSFixedArray.h"
57 #include "JSGeneratorFunction.h"
58 #include "JSLexicalEnvironment.h"
59 #include "JSPropertyNameEnumerator.h"
60 #include "LinkBuffer.h"
61 #include "RegExpConstructor.h"
62 #include "ScopedArguments.h"
63 #include "ScratchRegisterAllocator.h"
64 #include "SuperSampler.h"
65 #include "WeakMapImpl.h"
66 #include <wtf/BitVector.h>
67 #include <wtf/Box.h>
68 #include <wtf/MathExtras.h>
69
70 namespace JSC { namespace DFG {
71
72 SpeculativeJIT::SpeculativeJIT(JITCompiler& jit)
73     : m_compileOkay(true)
74     , m_jit(jit)
75     , m_graph(m_jit.graph())
76     , m_currentNode(0)
77     , m_lastGeneratedNode(LastNodeType)
78     , m_indexInBlock(0)
79     , m_indexMaskingMode(Options::enableSpectreMitigations() ? IndexMaskingEnabled : IndexMaskingDisabled)
80     , m_generationInfo(m_jit.graph().frameRegisterCount())
81     , m_state(m_jit.graph())
82     , m_interpreter(m_jit.graph(), m_state)
83     , m_stream(&jit.jitCode()->variableEventStream)
84     , m_minifiedGraph(&jit.jitCode()->minifiedDFG)
85 {
86 }
87
88 SpeculativeJIT::~SpeculativeJIT()
89 {
90 }
91
92 void SpeculativeJIT::emitAllocateRawObject(GPRReg resultGPR, RegisteredStructure structure, GPRReg storageGPR, unsigned numElements, unsigned vectorLength)
93 {
94     IndexingType indexingType = structure->indexingType();
95     bool hasIndexingHeader = hasIndexedProperties(indexingType);
96
97     unsigned inlineCapacity = structure->inlineCapacity();
98     unsigned outOfLineCapacity = structure->outOfLineCapacity();
99     
100     GPRTemporary scratch(this);
101     GPRTemporary scratch2(this);
102     GPRReg scratchGPR = scratch.gpr();
103     GPRReg scratch2GPR = scratch2.gpr();
104
105     ASSERT(vectorLength >= numElements);
106     vectorLength = Butterfly::optimalContiguousVectorLength(structure.get(), vectorLength);
107     
108     JITCompiler::JumpList slowCases;
109
110     size_t size = 0;
111     if (hasIndexingHeader)
112         size += vectorLength * sizeof(JSValue) + sizeof(IndexingHeader);
113     size += outOfLineCapacity * sizeof(JSValue);
114
115     m_jit.move(TrustedImmPtr(nullptr), storageGPR);
116
117     if (size) {
118         if (Allocator allocator = m_jit.vm()->jsValueGigacageAuxiliarySpace.allocatorForNonVirtual(size, AllocatorForMode::AllocatorIfExists)) {
119             m_jit.emitAllocate(storageGPR, JITAllocator::constant(allocator), scratchGPR, scratch2GPR, slowCases);
120             
121             m_jit.addPtr(
122                 TrustedImm32(outOfLineCapacity * sizeof(JSValue) + sizeof(IndexingHeader)),
123                 storageGPR);
124             
125             if (hasIndexingHeader)
126                 m_jit.store32(TrustedImm32(vectorLength), MacroAssembler::Address(storageGPR, Butterfly::offsetOfVectorLength()));
127         } else
128             slowCases.append(m_jit.jump());
129     }
130
131     size_t allocationSize = JSFinalObject::allocationSize(inlineCapacity);
132     Allocator allocator = subspaceFor<JSFinalObject>(*m_jit.vm())->allocatorForNonVirtual(allocationSize, AllocatorForMode::AllocatorIfExists);
133     if (allocator) {
134         uint32_t mask = WTF::computeIndexingMask(vectorLength);
135         emitAllocateJSObject(resultGPR, JITAllocator::constant(allocator), scratchGPR, TrustedImmPtr(structure), storageGPR, TrustedImm32(mask), scratch2GPR, slowCases);
136         m_jit.emitInitializeInlineStorage(resultGPR, structure->inlineCapacity());
137     } else
138         slowCases.append(m_jit.jump());
139
140     // I want a slow path that also loads out the storage pointer, and that's
141     // what this custom CallArrayAllocatorSlowPathGenerator gives me. It's a lot
142     // of work for a very small piece of functionality. :-/
143     addSlowPathGenerator(std::make_unique<CallArrayAllocatorSlowPathGenerator>(
144         slowCases, this, operationNewRawObject, resultGPR, storageGPR,
145         structure, vectorLength));
146
147     if (numElements < vectorLength) {
148 #if USE(JSVALUE64)
149         if (hasDouble(structure->indexingType()))
150             m_jit.move(TrustedImm64(bitwise_cast<int64_t>(PNaN)), scratchGPR);
151         else
152             m_jit.move(TrustedImm64(JSValue::encode(JSValue())), scratchGPR);
153         for (unsigned i = numElements; i < vectorLength; ++i)
154             m_jit.store64(scratchGPR, MacroAssembler::Address(storageGPR, sizeof(double) * i));
155 #else
156         EncodedValueDescriptor value;
157         if (hasDouble(structure->indexingType()))
158             value.asInt64 = JSValue::encode(JSValue(JSValue::EncodeAsDouble, PNaN));
159         else
160             value.asInt64 = JSValue::encode(JSValue());
161         for (unsigned i = numElements; i < vectorLength; ++i) {
162             m_jit.store32(TrustedImm32(value.asBits.tag), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
163             m_jit.store32(TrustedImm32(value.asBits.payload), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
164         }
165 #endif
166     }
167     
168     if (hasIndexingHeader)
169         m_jit.store32(TrustedImm32(numElements), MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()));
170     
171     m_jit.emitInitializeOutOfLineStorage(storageGPR, structure->outOfLineCapacity());
172     
173     m_jit.mutatorFence(*m_jit.vm());
174 }
175
176 void SpeculativeJIT::emitGetLength(InlineCallFrame* inlineCallFrame, GPRReg lengthGPR, bool includeThis)
177 {
178     if (inlineCallFrame && !inlineCallFrame->isVarargs())
179         m_jit.move(TrustedImm32(inlineCallFrame->argumentCountIncludingThis - !includeThis), lengthGPR);
180     else {
181         VirtualRegister argumentCountRegister = m_jit.argumentCount(inlineCallFrame);
182         m_jit.load32(JITCompiler::payloadFor(argumentCountRegister), lengthGPR);
183         if (!includeThis)
184             m_jit.sub32(TrustedImm32(1), lengthGPR);
185     }
186 }
187
188 void SpeculativeJIT::emitGetLength(CodeOrigin origin, GPRReg lengthGPR, bool includeThis)
189 {
190     emitGetLength(origin.inlineCallFrame, lengthGPR, includeThis);
191 }
192
193 void SpeculativeJIT::emitGetCallee(CodeOrigin origin, GPRReg calleeGPR)
194 {
195     if (origin.inlineCallFrame) {
196         if (origin.inlineCallFrame->isClosureCall) {
197             m_jit.loadPtr(
198                 JITCompiler::addressFor(origin.inlineCallFrame->calleeRecovery.virtualRegister()),
199                 calleeGPR);
200         } else {
201             m_jit.move(
202                 TrustedImmPtr::weakPointer(m_jit.graph(), origin.inlineCallFrame->calleeRecovery.constant().asCell()),
203                 calleeGPR);
204         }
205     } else
206         m_jit.loadPtr(JITCompiler::addressFor(CallFrameSlot::callee), calleeGPR);
207 }
208
209 void SpeculativeJIT::emitGetArgumentStart(CodeOrigin origin, GPRReg startGPR)
210 {
211     m_jit.addPtr(
212         TrustedImm32(
213             JITCompiler::argumentsStart(origin).offset() * static_cast<int>(sizeof(Register))),
214         GPRInfo::callFrameRegister, startGPR);
215 }
216
217 MacroAssembler::Jump SpeculativeJIT::emitOSRExitFuzzCheck()
218 {
219     if (!Options::useOSRExitFuzz()
220         || !canUseOSRExitFuzzing(m_jit.graph().baselineCodeBlockFor(m_origin.semantic))
221         || !doOSRExitFuzzing())
222         return MacroAssembler::Jump();
223     
224     MacroAssembler::Jump result;
225     
226     m_jit.pushToSave(GPRInfo::regT0);
227     m_jit.load32(&g_numberOfOSRExitFuzzChecks, GPRInfo::regT0);
228     m_jit.add32(TrustedImm32(1), GPRInfo::regT0);
229     m_jit.store32(GPRInfo::regT0, &g_numberOfOSRExitFuzzChecks);
230     unsigned atOrAfter = Options::fireOSRExitFuzzAtOrAfter();
231     unsigned at = Options::fireOSRExitFuzzAt();
232     if (at || atOrAfter) {
233         unsigned threshold;
234         MacroAssembler::RelationalCondition condition;
235         if (atOrAfter) {
236             threshold = atOrAfter;
237             condition = MacroAssembler::Below;
238         } else {
239             threshold = at;
240             condition = MacroAssembler::NotEqual;
241         }
242         MacroAssembler::Jump ok = m_jit.branch32(
243             condition, GPRInfo::regT0, MacroAssembler::TrustedImm32(threshold));
244         m_jit.popToRestore(GPRInfo::regT0);
245         result = m_jit.jump();
246         ok.link(&m_jit);
247     }
248     m_jit.popToRestore(GPRInfo::regT0);
249     
250     return result;
251 }
252
253 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail)
254 {
255     if (!m_compileOkay)
256         return;
257     JITCompiler::Jump fuzzJump = emitOSRExitFuzzCheck();
258     if (fuzzJump.isSet()) {
259         JITCompiler::JumpList jumpsToFail;
260         jumpsToFail.append(fuzzJump);
261         jumpsToFail.append(jumpToFail);
262         m_jit.appendExitInfo(jumpsToFail);
263     } else
264         m_jit.appendExitInfo(jumpToFail);
265     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(m_currentNode, node), this, m_stream->size()));
266 }
267
268 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, const MacroAssembler::JumpList& jumpsToFail)
269 {
270     if (!m_compileOkay)
271         return;
272     JITCompiler::Jump fuzzJump = emitOSRExitFuzzCheck();
273     if (fuzzJump.isSet()) {
274         JITCompiler::JumpList myJumpsToFail;
275         myJumpsToFail.append(jumpsToFail);
276         myJumpsToFail.append(fuzzJump);
277         m_jit.appendExitInfo(myJumpsToFail);
278     } else
279         m_jit.appendExitInfo(jumpsToFail);
280     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(m_currentNode, node), this, m_stream->size()));
281 }
282
283 OSRExitJumpPlaceholder SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node)
284 {
285     if (!m_compileOkay)
286         return OSRExitJumpPlaceholder();
287     unsigned index = m_jit.jitCode()->osrExit.size();
288     m_jit.appendExitInfo();
289     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(m_currentNode, node), this, m_stream->size()));
290     return OSRExitJumpPlaceholder(index);
291 }
292
293 OSRExitJumpPlaceholder SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse)
294 {
295     return speculationCheck(kind, jsValueSource, nodeUse.node());
296 }
297
298 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail)
299 {
300     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail);
301 }
302
303 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, const MacroAssembler::JumpList& jumpsToFail)
304 {
305     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpsToFail);
306 }
307
308 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
309 {
310     if (!m_compileOkay)
311         return;
312     unsigned recoveryIndex = m_jit.jitCode()->appendSpeculationRecovery(recovery);
313     m_jit.appendExitInfo(jumpToFail);
314     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(m_currentNode, node), this, m_stream->size(), recoveryIndex));
315 }
316
317 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
318 {
319     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail, recovery);
320 }
321
322 void SpeculativeJIT::emitInvalidationPoint(Node* node)
323 {
324     if (!m_compileOkay)
325         return;
326     OSRExitCompilationInfo& info = m_jit.appendExitInfo(JITCompiler::JumpList());
327     m_jit.jitCode()->appendOSRExit(OSRExit(
328         UncountableInvalidation, JSValueSource(), MethodOfGettingAValueProfile(),
329         this, m_stream->size()));
330     info.m_replacementSource = m_jit.watchpointLabel();
331     ASSERT(info.m_replacementSource.isSet());
332     noResult(node);
333 }
334
335 void SpeculativeJIT::unreachable(Node* node)
336 {
337     m_compileOkay = false;
338     m_jit.abortWithReason(DFGUnreachableNode, node->op());
339 }
340
341 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Node* node)
342 {
343     if (!m_compileOkay)
344         return;
345     speculationCheck(kind, jsValueRegs, node, m_jit.jump());
346     m_compileOkay = false;
347     if (verboseCompilationEnabled())
348         dataLog("Bailing compilation.\n");
349 }
350
351 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Edge nodeUse)
352 {
353     terminateSpeculativeExecution(kind, jsValueRegs, nodeUse.node());
354 }
355
356 void SpeculativeJIT::typeCheck(JSValueSource source, Edge edge, SpeculatedType typesPassedThrough, MacroAssembler::Jump jumpToFail, ExitKind exitKind)
357 {
358     ASSERT(needsTypeCheck(edge, typesPassedThrough));
359     m_interpreter.filter(edge, typesPassedThrough);
360     speculationCheck(exitKind, source, edge.node(), jumpToFail);
361 }
362
363 RegisterSet SpeculativeJIT::usedRegisters()
364 {
365     RegisterSet result;
366     
367     for (unsigned i = GPRInfo::numberOfRegisters; i--;) {
368         GPRReg gpr = GPRInfo::toRegister(i);
369         if (m_gprs.isInUse(gpr))
370             result.set(gpr);
371     }
372     for (unsigned i = FPRInfo::numberOfRegisters; i--;) {
373         FPRReg fpr = FPRInfo::toRegister(i);
374         if (m_fprs.isInUse(fpr))
375             result.set(fpr);
376     }
377     
378     result.merge(RegisterSet::stubUnavailableRegisters());
379     
380     return result;
381 }
382
383 void SpeculativeJIT::addSlowPathGenerator(std::unique_ptr<SlowPathGenerator> slowPathGenerator)
384 {
385     m_slowPathGenerators.append(WTFMove(slowPathGenerator));
386 }
387
388 void SpeculativeJIT::addSlowPathGenerator(std::function<void()> lambda)
389 {
390     m_slowPathLambdas.append(SlowPathLambda{ lambda, m_currentNode, static_cast<unsigned>(m_stream->size()) });
391 }
392
393 void SpeculativeJIT::runSlowPathGenerators(PCToCodeOriginMapBuilder& pcToCodeOriginMapBuilder)
394 {
395     for (auto& slowPathGenerator : m_slowPathGenerators) {
396         pcToCodeOriginMapBuilder.appendItem(m_jit.labelIgnoringWatchpoints(), slowPathGenerator->origin().semantic);
397         slowPathGenerator->generate(this);
398     }
399     for (auto& slowPathLambda : m_slowPathLambdas) {
400         Node* currentNode = slowPathLambda.currentNode;
401         m_currentNode = currentNode;
402         m_outOfLineStreamIndex = slowPathLambda.streamIndex;
403         pcToCodeOriginMapBuilder.appendItem(m_jit.labelIgnoringWatchpoints(), currentNode->origin.semantic);
404         slowPathLambda.generator();
405         m_outOfLineStreamIndex = std::nullopt;
406     }
407 }
408
409 void SpeculativeJIT::clearGenerationInfo()
410 {
411     for (unsigned i = 0; i < m_generationInfo.size(); ++i)
412         m_generationInfo[i] = GenerationInfo();
413     m_gprs = RegisterBank<GPRInfo>();
414     m_fprs = RegisterBank<FPRInfo>();
415 }
416
417 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForGPR(VirtualRegister spillMe, GPRReg source)
418 {
419     GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
420     Node* node = info.node();
421     DataFormat registerFormat = info.registerFormat();
422     ASSERT(registerFormat != DataFormatNone);
423     ASSERT(registerFormat != DataFormatDouble);
424         
425     SilentSpillAction spillAction;
426     SilentFillAction fillAction;
427         
428     if (!info.needsSpill())
429         spillAction = DoNothingForSpill;
430     else {
431 #if USE(JSVALUE64)
432         ASSERT(info.gpr() == source);
433         if (registerFormat == DataFormatInt32)
434             spillAction = Store32Payload;
435         else if (registerFormat == DataFormatCell || registerFormat == DataFormatStorage)
436             spillAction = StorePtr;
437         else if (registerFormat == DataFormatInt52 || registerFormat == DataFormatStrictInt52)
438             spillAction = Store64;
439         else {
440             ASSERT(registerFormat & DataFormatJS);
441             spillAction = Store64;
442         }
443 #elif USE(JSVALUE32_64)
444         if (registerFormat & DataFormatJS) {
445             ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
446             spillAction = source == info.tagGPR() ? Store32Tag : Store32Payload;
447         } else {
448             ASSERT(info.gpr() == source);
449             spillAction = Store32Payload;
450         }
451 #endif
452     }
453         
454     if (registerFormat == DataFormatInt32) {
455         ASSERT(info.gpr() == source);
456         ASSERT(isJSInt32(info.registerFormat()));
457         if (node->hasConstant()) {
458             ASSERT(node->isInt32Constant());
459             fillAction = SetInt32Constant;
460         } else
461             fillAction = Load32Payload;
462     } else if (registerFormat == DataFormatBoolean) {
463 #if USE(JSVALUE64)
464         RELEASE_ASSERT_NOT_REACHED();
465 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
466         fillAction = DoNothingForFill;
467 #endif
468 #elif USE(JSVALUE32_64)
469         ASSERT(info.gpr() == source);
470         if (node->hasConstant()) {
471             ASSERT(node->isBooleanConstant());
472             fillAction = SetBooleanConstant;
473         } else
474             fillAction = Load32Payload;
475 #endif
476     } else if (registerFormat == DataFormatCell) {
477         ASSERT(info.gpr() == source);
478         if (node->hasConstant()) {
479             DFG_ASSERT(m_jit.graph(), m_currentNode, node->isCellConstant());
480             node->asCell(); // To get the assertion.
481             fillAction = SetCellConstant;
482         } else {
483 #if USE(JSVALUE64)
484             fillAction = LoadPtr;
485 #else
486             fillAction = Load32Payload;
487 #endif
488         }
489     } else if (registerFormat == DataFormatStorage) {
490         ASSERT(info.gpr() == source);
491         fillAction = LoadPtr;
492     } else if (registerFormat == DataFormatInt52) {
493         if (node->hasConstant())
494             fillAction = SetInt52Constant;
495         else if (info.spillFormat() == DataFormatInt52)
496             fillAction = Load64;
497         else if (info.spillFormat() == DataFormatStrictInt52)
498             fillAction = Load64ShiftInt52Left;
499         else if (info.spillFormat() == DataFormatNone)
500             fillAction = Load64;
501         else {
502             RELEASE_ASSERT_NOT_REACHED();
503 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
504             fillAction = Load64; // Make GCC happy.
505 #endif
506         }
507     } else if (registerFormat == DataFormatStrictInt52) {
508         if (node->hasConstant())
509             fillAction = SetStrictInt52Constant;
510         else if (info.spillFormat() == DataFormatInt52)
511             fillAction = Load64ShiftInt52Right;
512         else if (info.spillFormat() == DataFormatStrictInt52)
513             fillAction = Load64;
514         else if (info.spillFormat() == DataFormatNone)
515             fillAction = Load64;
516         else {
517             RELEASE_ASSERT_NOT_REACHED();
518 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
519             fillAction = Load64; // Make GCC happy.
520 #endif
521         }
522     } else {
523         ASSERT(registerFormat & DataFormatJS);
524 #if USE(JSVALUE64)
525         ASSERT(info.gpr() == source);
526         if (node->hasConstant()) {
527             if (node->isCellConstant())
528                 fillAction = SetTrustedJSConstant;
529             else
530                 fillAction = SetJSConstant;
531         } else if (info.spillFormat() == DataFormatInt32) {
532             ASSERT(registerFormat == DataFormatJSInt32);
533             fillAction = Load32PayloadBoxInt;
534         } else
535             fillAction = Load64;
536 #else
537         ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
538         if (node->hasConstant())
539             fillAction = info.tagGPR() == source ? SetJSConstantTag : SetJSConstantPayload;
540         else if (info.payloadGPR() == source)
541             fillAction = Load32Payload;
542         else { // Fill the Tag
543             switch (info.spillFormat()) {
544             case DataFormatInt32:
545                 ASSERT(registerFormat == DataFormatJSInt32);
546                 fillAction = SetInt32Tag;
547                 break;
548             case DataFormatCell:
549                 ASSERT(registerFormat == DataFormatJSCell);
550                 fillAction = SetCellTag;
551                 break;
552             case DataFormatBoolean:
553                 ASSERT(registerFormat == DataFormatJSBoolean);
554                 fillAction = SetBooleanTag;
555                 break;
556             default:
557                 fillAction = Load32Tag;
558                 break;
559             }
560         }
561 #endif
562     }
563         
564     return SilentRegisterSavePlan(spillAction, fillAction, node, source);
565 }
566     
567 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForFPR(VirtualRegister spillMe, FPRReg source)
568 {
569     GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
570     Node* node = info.node();
571     ASSERT(info.registerFormat() == DataFormatDouble);
572
573     SilentSpillAction spillAction;
574     SilentFillAction fillAction;
575         
576     if (!info.needsSpill())
577         spillAction = DoNothingForSpill;
578     else {
579         ASSERT(!node->hasConstant());
580         ASSERT(info.spillFormat() == DataFormatNone);
581         ASSERT(info.fpr() == source);
582         spillAction = StoreDouble;
583     }
584         
585 #if USE(JSVALUE64)
586     if (node->hasConstant()) {
587         node->asNumber(); // To get the assertion.
588         fillAction = SetDoubleConstant;
589     } else {
590         ASSERT(info.spillFormat() == DataFormatNone || info.spillFormat() == DataFormatDouble);
591         fillAction = LoadDouble;
592     }
593 #elif USE(JSVALUE32_64)
594     ASSERT(info.registerFormat() == DataFormatDouble);
595     if (node->hasConstant()) {
596         node->asNumber(); // To get the assertion.
597         fillAction = SetDoubleConstant;
598     } else
599         fillAction = LoadDouble;
600 #endif
601
602     return SilentRegisterSavePlan(spillAction, fillAction, node, source);
603 }
604     
605 void SpeculativeJIT::silentSpill(const SilentRegisterSavePlan& plan)
606 {
607     switch (plan.spillAction()) {
608     case DoNothingForSpill:
609         break;
610     case Store32Tag:
611         m_jit.store32(plan.gpr(), JITCompiler::tagFor(plan.node()->virtualRegister()));
612         break;
613     case Store32Payload:
614         m_jit.store32(plan.gpr(), JITCompiler::payloadFor(plan.node()->virtualRegister()));
615         break;
616     case StorePtr:
617         m_jit.storePtr(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
618         break;
619 #if USE(JSVALUE64)
620     case Store64:
621         m_jit.store64(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
622         break;
623 #endif
624     case StoreDouble:
625         m_jit.storeDouble(plan.fpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
626         break;
627     default:
628         RELEASE_ASSERT_NOT_REACHED();
629     }
630 }
631     
632 void SpeculativeJIT::silentFill(const SilentRegisterSavePlan& plan)
633 {
634     switch (plan.fillAction()) {
635     case DoNothingForFill:
636         break;
637     case SetInt32Constant:
638         m_jit.move(Imm32(plan.node()->asInt32()), plan.gpr());
639         break;
640 #if USE(JSVALUE64)
641     case SetInt52Constant:
642         m_jit.move(Imm64(plan.node()->asAnyInt() << JSValue::int52ShiftAmount), plan.gpr());
643         break;
644     case SetStrictInt52Constant:
645         m_jit.move(Imm64(plan.node()->asAnyInt()), plan.gpr());
646         break;
647 #endif // USE(JSVALUE64)
648     case SetBooleanConstant:
649         m_jit.move(TrustedImm32(plan.node()->asBoolean()), plan.gpr());
650         break;
651     case SetCellConstant:
652         ASSERT(plan.node()->constant()->value().isCell());
653         m_jit.move(TrustedImmPtr(plan.node()->constant()), plan.gpr());
654         break;
655 #if USE(JSVALUE64)
656     case SetTrustedJSConstant:
657         m_jit.move(valueOfJSConstantAsImm64(plan.node()).asTrustedImm64(), plan.gpr());
658         break;
659     case SetJSConstant:
660         m_jit.move(valueOfJSConstantAsImm64(plan.node()), plan.gpr());
661         break;
662     case SetDoubleConstant:
663         m_jit.moveDouble(Imm64(reinterpretDoubleToInt64(plan.node()->asNumber())), plan.fpr());
664         break;
665     case Load32PayloadBoxInt:
666         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
667         m_jit.or64(GPRInfo::tagTypeNumberRegister, plan.gpr());
668         break;
669     case Load32PayloadConvertToInt52:
670         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
671         m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
672         m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
673         break;
674     case Load32PayloadSignExtend:
675         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
676         m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
677         break;
678 #else
679     case SetJSConstantTag:
680         m_jit.move(Imm32(plan.node()->asJSValue().tag()), plan.gpr());
681         break;
682     case SetJSConstantPayload:
683         m_jit.move(Imm32(plan.node()->asJSValue().payload()), plan.gpr());
684         break;
685     case SetInt32Tag:
686         m_jit.move(TrustedImm32(JSValue::Int32Tag), plan.gpr());
687         break;
688     case SetCellTag:
689         m_jit.move(TrustedImm32(JSValue::CellTag), plan.gpr());
690         break;
691     case SetBooleanTag:
692         m_jit.move(TrustedImm32(JSValue::BooleanTag), plan.gpr());
693         break;
694     case SetDoubleConstant:
695         m_jit.loadDouble(TrustedImmPtr(m_jit.addressOfDoubleConstant(plan.node())), plan.fpr());
696         break;
697 #endif
698     case Load32Tag:
699         m_jit.load32(JITCompiler::tagFor(plan.node()->virtualRegister()), plan.gpr());
700         break;
701     case Load32Payload:
702         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
703         break;
704     case LoadPtr:
705         m_jit.loadPtr(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
706         break;
707 #if USE(JSVALUE64)
708     case Load64:
709         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
710         break;
711     case Load64ShiftInt52Right:
712         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
713         m_jit.rshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
714         break;
715     case Load64ShiftInt52Left:
716         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
717         m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
718         break;
719 #endif
720     case LoadDouble:
721         m_jit.loadDouble(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.fpr());
722         break;
723     default:
724         RELEASE_ASSERT_NOT_REACHED();
725     }
726 }
727
728 JITCompiler::JumpList SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode)
729 {
730     JITCompiler::JumpList result;
731     
732     switch (arrayMode.type()) {
733     case Array::Int32:
734     case Array::Double:
735     case Array::Contiguous:
736     case Array::Undecided:
737     case Array::ArrayStorage: {
738         IndexingType shape = arrayMode.shapeMask();
739         switch (arrayMode.arrayClass()) {
740         case Array::OriginalArray:
741             RELEASE_ASSERT_NOT_REACHED();
742             return result;
743
744         case Array::Array:
745             m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
746             result.append(m_jit.branch32(
747                 MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | shape)));
748             return result;
749
750         case Array::NonArray:
751         case Array::OriginalNonArray:
752             m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
753             result.append(m_jit.branch32(
754                 MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape)));
755             return result;
756
757         case Array::PossiblyArray:
758             m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
759             result.append(m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape)));
760             return result;
761         }
762
763         RELEASE_ASSERT_NOT_REACHED();
764         return result;
765     }
766
767     case Array::SlowPutArrayStorage: {
768         ASSERT(!arrayMode.isJSArrayWithOriginalStructure());
769
770         switch (arrayMode.arrayClass()) {
771         case Array::OriginalArray:
772             RELEASE_ASSERT_NOT_REACHED();
773             return result;
774
775         case Array::Array:
776             result.append(
777                 m_jit.branchTest32(
778                     MacroAssembler::Zero, tempGPR, MacroAssembler::TrustedImm32(IsArray)));
779             break;
780
781         case Array::NonArray:
782         case Array::OriginalNonArray:
783             result.append(
784                 m_jit.branchTest32(
785                     MacroAssembler::NonZero, tempGPR, MacroAssembler::TrustedImm32(IsArray)));
786             break;
787
788         case Array::PossiblyArray:
789             break;
790         }
791
792         m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
793         m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
794         result.append(
795             m_jit.branch32(
796                 MacroAssembler::Above, tempGPR,
797                 TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
798         return result;
799     }
800     default:
801         CRASH();
802         break;
803     }
804     
805     return result;
806 }
807
808 void SpeculativeJIT::checkArray(Node* node)
809 {
810     ASSERT(node->arrayMode().isSpecific());
811     ASSERT(!node->arrayMode().doesConversion());
812     
813     SpeculateCellOperand base(this, node->child1());
814     GPRReg baseReg = base.gpr();
815     
816     if (node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1()))) {
817         noResult(m_currentNode);
818         return;
819     }
820     
821     switch (node->arrayMode().type()) {
822     case Array::AnyTypedArray:
823     case Array::String:
824         RELEASE_ASSERT_NOT_REACHED(); // Should have been a Phantom(String:)
825         return;
826     case Array::Int32:
827     case Array::Double:
828     case Array::Contiguous:
829     case Array::Undecided:
830     case Array::ArrayStorage:
831     case Array::SlowPutArrayStorage: {
832         GPRTemporary temp(this);
833         GPRReg tempGPR = temp.gpr();
834         m_jit.load8(MacroAssembler::Address(baseReg, JSCell::indexingTypeAndMiscOffset()), tempGPR);
835         speculationCheck(
836             BadIndexingType, JSValueSource::unboxedCell(baseReg), 0,
837             jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
838         
839         noResult(m_currentNode);
840         return;
841     }
842     case Array::DirectArguments:
843         speculateCellTypeWithoutTypeFiltering(node->child1(), baseReg, DirectArgumentsType);
844         noResult(m_currentNode);
845         return;
846     case Array::ScopedArguments:
847         speculateCellTypeWithoutTypeFiltering(node->child1(), baseReg, ScopedArgumentsType);
848         noResult(m_currentNode);
849         return;
850     default:
851         speculateCellTypeWithoutTypeFiltering(
852             node->child1(), baseReg,
853             typeForTypedArrayType(node->arrayMode().typedArrayType()));
854         noResult(m_currentNode);
855         return;
856     }
857 }
858
859 void SpeculativeJIT::arrayify(Node* node, GPRReg baseReg, GPRReg propertyReg)
860 {
861     ASSERT(node->arrayMode().doesConversion());
862     
863     GPRTemporary temp(this);
864     GPRTemporary structure;
865     GPRReg tempGPR = temp.gpr();
866     GPRReg structureGPR = InvalidGPRReg;
867     
868     if (node->op() != ArrayifyToStructure) {
869         GPRTemporary realStructure(this);
870         structure.adopt(realStructure);
871         structureGPR = structure.gpr();
872     }
873         
874     // We can skip all that comes next if we already have array storage.
875     MacroAssembler::JumpList slowPath;
876     
877     if (node->op() == ArrayifyToStructure) {
878         slowPath.append(m_jit.branchWeakStructure(
879             JITCompiler::NotEqual,
880             JITCompiler::Address(baseReg, JSCell::structureIDOffset()),
881             node->structure()));
882     } else {
883         m_jit.load8(
884             MacroAssembler::Address(baseReg, JSCell::indexingTypeAndMiscOffset()), tempGPR);
885         
886         slowPath.append(jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
887     }
888     
889     addSlowPathGenerator(std::make_unique<ArrayifySlowPathGenerator>(
890         slowPath, this, node, baseReg, propertyReg, tempGPR, structureGPR));
891     
892     noResult(m_currentNode);
893 }
894
895 void SpeculativeJIT::arrayify(Node* node)
896 {
897     ASSERT(node->arrayMode().isSpecific());
898     
899     SpeculateCellOperand base(this, node->child1());
900     
901     if (!node->child2()) {
902         arrayify(node, base.gpr(), InvalidGPRReg);
903         return;
904     }
905     
906     SpeculateInt32Operand property(this, node->child2());
907     
908     arrayify(node, base.gpr(), property.gpr());
909 }
910
911 GPRReg SpeculativeJIT::fillStorage(Edge edge)
912 {
913     VirtualRegister virtualRegister = edge->virtualRegister();
914     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
915     
916     switch (info.registerFormat()) {
917     case DataFormatNone: {
918         if (info.spillFormat() == DataFormatStorage) {
919             GPRReg gpr = allocate();
920             m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
921             m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr);
922             info.fillStorage(*m_stream, gpr);
923             return gpr;
924         }
925         
926         // Must be a cell; fill it as a cell and then return the pointer.
927         return fillSpeculateCell(edge);
928     }
929         
930     case DataFormatStorage: {
931         GPRReg gpr = info.gpr();
932         m_gprs.lock(gpr);
933         return gpr;
934     }
935         
936     default:
937         return fillSpeculateCell(edge);
938     }
939 }
940
941 void SpeculativeJIT::useChildren(Node* node)
942 {
943     if (node->flags() & NodeHasVarArgs) {
944         for (unsigned childIdx = node->firstChild(); childIdx < node->firstChild() + node->numChildren(); childIdx++) {
945             if (!!m_jit.graph().m_varArgChildren[childIdx])
946                 use(m_jit.graph().m_varArgChildren[childIdx]);
947         }
948     } else {
949         Edge child1 = node->child1();
950         if (!child1) {
951             ASSERT(!node->child2() && !node->child3());
952             return;
953         }
954         use(child1);
955         
956         Edge child2 = node->child2();
957         if (!child2) {
958             ASSERT(!node->child3());
959             return;
960         }
961         use(child2);
962         
963         Edge child3 = node->child3();
964         if (!child3)
965             return;
966         use(child3);
967     }
968 }
969
970 void SpeculativeJIT::compileTryGetById(Node* node)
971 {
972     switch (node->child1().useKind()) {
973     case CellUse: {
974         SpeculateCellOperand base(this, node->child1());
975         JSValueRegsTemporary result(this, Reuse, base);
976
977         JSValueRegs baseRegs = JSValueRegs::payloadOnly(base.gpr());
978         JSValueRegs resultRegs = result.regs();
979
980         base.use();
981
982         cachedGetById(node->origin.semantic, baseRegs, resultRegs, node->identifierNumber(), JITCompiler::Jump(), NeedToSpill, AccessType::TryGet);
983
984         jsValueResult(resultRegs, node, DataFormatJS, UseChildrenCalledExplicitly);
985         break;
986     }
987
988     case UntypedUse: {
989         JSValueOperand base(this, node->child1());
990         JSValueRegsTemporary result(this, Reuse, base);
991
992         JSValueRegs baseRegs = base.jsValueRegs();
993         JSValueRegs resultRegs = result.regs();
994
995         base.use();
996
997         JITCompiler::Jump notCell = m_jit.branchIfNotCell(baseRegs);
998
999         cachedGetById(node->origin.semantic, baseRegs, resultRegs, node->identifierNumber(), notCell, NeedToSpill, AccessType::TryGet);
1000
1001         jsValueResult(resultRegs, node, DataFormatJS, UseChildrenCalledExplicitly);
1002         break;
1003     }
1004
1005     default:
1006         DFG_CRASH(m_jit.graph(), node, "Bad use kind");
1007         break;
1008     } 
1009 }
1010
1011 void SpeculativeJIT::compileIn(Node* node)
1012 {
1013     SpeculateCellOperand base(this, node->child1());
1014     GPRReg baseGPR = base.gpr();
1015     
1016     if (JSString* string = node->child2()->dynamicCastConstant<JSString*>(*m_jit.vm())) {
1017         if (string->tryGetValueImpl() && string->tryGetValueImpl()->isAtomic()) {
1018             StructureStubInfo* stubInfo = m_jit.codeBlock()->addStubInfo(AccessType::In);
1019             
1020             GPRTemporary result(this);
1021             GPRReg resultGPR = result.gpr();
1022
1023             use(node->child2());
1024             
1025             MacroAssembler::PatchableJump jump = m_jit.patchableJump();
1026             MacroAssembler::Label done = m_jit.label();
1027             
1028             // Since this block is executed only when the result of string->tryGetValueImpl() is atomic,
1029             // we can cast it to const AtomicStringImpl* safely.
1030             auto slowPath = slowPathCall(
1031                 jump.m_jump, this, operationInOptimize,
1032                 JSValueRegs::payloadOnly(resultGPR), stubInfo, baseGPR,
1033                 static_cast<const AtomicStringImpl*>(string->tryGetValueImpl()));
1034             
1035             stubInfo->callSiteIndex = m_jit.addCallSite(node->origin.semantic);
1036             stubInfo->codeOrigin = node->origin.semantic;
1037             stubInfo->patch.baseGPR = static_cast<int8_t>(baseGPR);
1038             stubInfo->patch.valueGPR = static_cast<int8_t>(resultGPR);
1039             stubInfo->patch.thisGPR = static_cast<int8_t>(InvalidGPRReg);
1040 #if USE(JSVALUE32_64)
1041             stubInfo->patch.valueTagGPR = static_cast<int8_t>(InvalidGPRReg);
1042             stubInfo->patch.baseTagGPR = static_cast<int8_t>(InvalidGPRReg);
1043             stubInfo->patch.thisTagGPR = static_cast<int8_t>(InvalidGPRReg);
1044 #endif
1045             stubInfo->patch.usedRegisters = usedRegisters();
1046
1047             m_jit.addIn(InRecord(jump, done, slowPath.get(), stubInfo));
1048             addSlowPathGenerator(WTFMove(slowPath));
1049
1050             base.use();
1051
1052             blessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
1053             return;
1054         }
1055     }
1056
1057     JSValueOperand key(this, node->child2());
1058     JSValueRegs regs = key.jsValueRegs();
1059         
1060     GPRFlushedCallResult result(this);
1061     GPRReg resultGPR = result.gpr();
1062         
1063     base.use();
1064     key.use();
1065         
1066     flushRegisters();
1067     callOperation(
1068         operationGenericIn, extractResult(JSValueRegs::payloadOnly(resultGPR)),
1069         baseGPR, regs);
1070     m_jit.exceptionCheck();
1071     blessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
1072 }
1073
1074 void SpeculativeJIT::compileDeleteById(Node* node)
1075 {
1076     JSValueOperand value(this, node->child1());
1077     GPRFlushedCallResult result(this);
1078
1079     JSValueRegs valueRegs = value.jsValueRegs();
1080     GPRReg resultGPR = result.gpr();
1081
1082     value.use();
1083
1084     flushRegisters();
1085     callOperation(operationDeleteById, resultGPR, valueRegs, identifierUID(node->identifierNumber()));
1086     m_jit.exceptionCheck();
1087
1088     unblessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
1089 }
1090
1091 void SpeculativeJIT::compileDeleteByVal(Node* node)
1092 {
1093     JSValueOperand base(this, node->child1());
1094     JSValueOperand key(this, node->child2());
1095     GPRFlushedCallResult result(this);
1096
1097     JSValueRegs baseRegs = base.jsValueRegs();
1098     JSValueRegs keyRegs = key.jsValueRegs();
1099     GPRReg resultGPR = result.gpr();
1100
1101     base.use();
1102     key.use();
1103
1104     flushRegisters();
1105     callOperation(operationDeleteByVal, resultGPR, baseRegs, keyRegs);
1106     m_jit.exceptionCheck();
1107
1108     unblessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
1109 }
1110
1111 void SpeculativeJIT::compilePushWithScope(Node* node)
1112 {
1113     SpeculateCellOperand currentScope(this, node->child1());
1114     GPRReg currentScopeGPR = currentScope.gpr();
1115
1116     GPRFlushedCallResult result(this);
1117     GPRReg resultGPR = result.gpr();
1118
1119     auto objectEdge = node->child2();
1120     if (objectEdge.useKind() == ObjectUse) {
1121         SpeculateCellOperand object(this, objectEdge);
1122         GPRReg objectGPR = object.gpr();
1123         speculateObject(objectEdge, objectGPR);
1124
1125         flushRegisters();
1126         callOperation(operationPushWithScopeObject, resultGPR, currentScopeGPR, objectGPR);
1127         // No exception check here as we did not have to call toObject().
1128     } else {
1129         ASSERT(objectEdge.useKind() == UntypedUse);
1130         JSValueOperand object(this, objectEdge);
1131         JSValueRegs objectRegs = object.jsValueRegs();
1132
1133         flushRegisters();
1134         callOperation(operationPushWithScope, resultGPR, currentScopeGPR, objectRegs);
1135         m_jit.exceptionCheck();
1136     }
1137     
1138     cellResult(resultGPR, node);
1139 }
1140
1141 bool SpeculativeJIT::nonSpeculativeCompare(Node* node, MacroAssembler::RelationalCondition cond, S_JITOperation_EJJ helperFunction)
1142 {
1143     unsigned branchIndexInBlock = detectPeepHoleBranch();
1144     if (branchIndexInBlock != UINT_MAX) {
1145         Node* branchNode = m_block->at(branchIndexInBlock);
1146
1147         ASSERT(node->adjustedRefCount() == 1);
1148         
1149         nonSpeculativePeepholeBranch(node, branchNode, cond, helperFunction);
1150     
1151         m_indexInBlock = branchIndexInBlock;
1152         m_currentNode = branchNode;
1153         
1154         return true;
1155     }
1156     
1157     nonSpeculativeNonPeepholeCompare(node, cond, helperFunction);
1158     
1159     return false;
1160 }
1161
1162 bool SpeculativeJIT::nonSpeculativeStrictEq(Node* node, bool invert)
1163 {
1164     unsigned branchIndexInBlock = detectPeepHoleBranch();
1165     if (branchIndexInBlock != UINT_MAX) {
1166         Node* branchNode = m_block->at(branchIndexInBlock);
1167
1168         ASSERT(node->adjustedRefCount() == 1);
1169         
1170         nonSpeculativePeepholeStrictEq(node, branchNode, invert);
1171     
1172         m_indexInBlock = branchIndexInBlock;
1173         m_currentNode = branchNode;
1174         
1175         return true;
1176     }
1177     
1178     nonSpeculativeNonPeepholeStrictEq(node, invert);
1179     
1180     return false;
1181 }
1182
1183 static const char* dataFormatString(DataFormat format)
1184 {
1185     // These values correspond to the DataFormat enum.
1186     const char* strings[] = {
1187         "[  ]",
1188         "[ i]",
1189         "[ d]",
1190         "[ c]",
1191         "Err!",
1192         "Err!",
1193         "Err!",
1194         "Err!",
1195         "[J ]",
1196         "[Ji]",
1197         "[Jd]",
1198         "[Jc]",
1199         "Err!",
1200         "Err!",
1201         "Err!",
1202         "Err!",
1203     };
1204     return strings[format];
1205 }
1206
1207 void SpeculativeJIT::dump(const char* label)
1208 {
1209     if (label)
1210         dataLogF("<%s>\n", label);
1211
1212     dataLogF("  gprs:\n");
1213     m_gprs.dump();
1214     dataLogF("  fprs:\n");
1215     m_fprs.dump();
1216     dataLogF("  VirtualRegisters:\n");
1217     for (unsigned i = 0; i < m_generationInfo.size(); ++i) {
1218         GenerationInfo& info = m_generationInfo[i];
1219         if (info.alive())
1220             dataLogF("    % 3d:%s%s", i, dataFormatString(info.registerFormat()), dataFormatString(info.spillFormat()));
1221         else
1222             dataLogF("    % 3d:[__][__]", i);
1223         if (info.registerFormat() == DataFormatDouble)
1224             dataLogF(":fpr%d\n", info.fpr());
1225         else if (info.registerFormat() != DataFormatNone
1226 #if USE(JSVALUE32_64)
1227             && !(info.registerFormat() & DataFormatJS)
1228 #endif
1229             ) {
1230             ASSERT(info.gpr() != InvalidGPRReg);
1231             dataLogF(":%s\n", GPRInfo::debugName(info.gpr()));
1232         } else
1233             dataLogF("\n");
1234     }
1235     if (label)
1236         dataLogF("</%s>\n", label);
1237 }
1238
1239 GPRTemporary::GPRTemporary()
1240     : m_jit(0)
1241     , m_gpr(InvalidGPRReg)
1242 {
1243 }
1244
1245 GPRTemporary::GPRTemporary(SpeculativeJIT* jit)
1246     : m_jit(jit)
1247     , m_gpr(InvalidGPRReg)
1248 {
1249     m_gpr = m_jit->allocate();
1250 }
1251
1252 GPRTemporary::GPRTemporary(SpeculativeJIT* jit, GPRReg specific)
1253     : m_jit(jit)
1254     , m_gpr(InvalidGPRReg)
1255 {
1256     m_gpr = m_jit->allocate(specific);
1257 }
1258
1259 #if USE(JSVALUE32_64)
1260 GPRTemporary::GPRTemporary(
1261     SpeculativeJIT* jit, ReuseTag, JSValueOperand& op1, WhichValueWord which)
1262     : m_jit(jit)
1263     , m_gpr(InvalidGPRReg)
1264 {
1265     if (!op1.isDouble() && m_jit->canReuse(op1.node()))
1266         m_gpr = m_jit->reuse(op1.gpr(which));
1267     else
1268         m_gpr = m_jit->allocate();
1269 }
1270 #endif // USE(JSVALUE32_64)
1271
1272 JSValueRegsTemporary::JSValueRegsTemporary() { }
1273
1274 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit)
1275 #if USE(JSVALUE64)
1276     : m_gpr(jit)
1277 #else
1278     : m_payloadGPR(jit)
1279     , m_tagGPR(jit)
1280 #endif
1281 {
1282 }
1283
1284 #if USE(JSVALUE64)
1285 template<typename T>
1286 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit, ReuseTag, T& operand, WhichValueWord)
1287     : m_gpr(jit, Reuse, operand)
1288 {
1289 }
1290 #else
1291 template<typename T>
1292 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit, ReuseTag, T& operand, WhichValueWord resultWord)
1293 {
1294     if (resultWord == PayloadWord) {
1295         m_payloadGPR = GPRTemporary(jit, Reuse, operand);
1296         m_tagGPR = GPRTemporary(jit);
1297     } else {
1298         m_payloadGPR = GPRTemporary(jit);
1299         m_tagGPR = GPRTemporary(jit, Reuse, operand);
1300     }
1301 }
1302 #endif
1303
1304 #if USE(JSVALUE64)
1305 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit, ReuseTag, JSValueOperand& operand)
1306 {
1307     m_gpr = GPRTemporary(jit, Reuse, operand);
1308 }
1309 #else
1310 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit, ReuseTag, JSValueOperand& operand)
1311 {
1312     if (jit->canReuse(operand.node())) {
1313         m_payloadGPR = GPRTemporary(jit, Reuse, operand, PayloadWord);
1314         m_tagGPR = GPRTemporary(jit, Reuse, operand, TagWord);
1315     } else {
1316         m_payloadGPR = GPRTemporary(jit);
1317         m_tagGPR = GPRTemporary(jit);
1318     }
1319 }
1320 #endif
1321
1322 JSValueRegsTemporary::~JSValueRegsTemporary() { }
1323
1324 JSValueRegs JSValueRegsTemporary::regs()
1325 {
1326 #if USE(JSVALUE64)
1327     return JSValueRegs(m_gpr.gpr());
1328 #else
1329     return JSValueRegs(m_tagGPR.gpr(), m_payloadGPR.gpr());
1330 #endif
1331 }
1332
1333 void GPRTemporary::adopt(GPRTemporary& other)
1334 {
1335     ASSERT(!m_jit);
1336     ASSERT(m_gpr == InvalidGPRReg);
1337     ASSERT(other.m_jit);
1338     ASSERT(other.m_gpr != InvalidGPRReg);
1339     m_jit = other.m_jit;
1340     m_gpr = other.m_gpr;
1341     other.m_jit = 0;
1342     other.m_gpr = InvalidGPRReg;
1343 }
1344
1345 FPRTemporary::FPRTemporary(FPRTemporary&& other)
1346 {
1347     ASSERT(other.m_jit);
1348     ASSERT(other.m_fpr != InvalidFPRReg);
1349     m_jit = other.m_jit;
1350     m_fpr = other.m_fpr;
1351
1352     other.m_jit = nullptr;
1353 }
1354
1355 FPRTemporary::FPRTemporary(SpeculativeJIT* jit)
1356     : m_jit(jit)
1357     , m_fpr(InvalidFPRReg)
1358 {
1359     m_fpr = m_jit->fprAllocate();
1360 }
1361
1362 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1)
1363     : m_jit(jit)
1364     , m_fpr(InvalidFPRReg)
1365 {
1366     if (m_jit->canReuse(op1.node()))
1367         m_fpr = m_jit->reuse(op1.fpr());
1368     else
1369         m_fpr = m_jit->fprAllocate();
1370 }
1371
1372 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1, SpeculateDoubleOperand& op2)
1373     : m_jit(jit)
1374     , m_fpr(InvalidFPRReg)
1375 {
1376     if (m_jit->canReuse(op1.node()))
1377         m_fpr = m_jit->reuse(op1.fpr());
1378     else if (m_jit->canReuse(op2.node()))
1379         m_fpr = m_jit->reuse(op2.fpr());
1380     else if (m_jit->canReuse(op1.node(), op2.node()) && op1.fpr() == op2.fpr())
1381         m_fpr = m_jit->reuse(op1.fpr());
1382     else
1383         m_fpr = m_jit->fprAllocate();
1384 }
1385
1386 #if USE(JSVALUE32_64)
1387 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, JSValueOperand& op1)
1388     : m_jit(jit)
1389     , m_fpr(InvalidFPRReg)
1390 {
1391     if (op1.isDouble() && m_jit->canReuse(op1.node()))
1392         m_fpr = m_jit->reuse(op1.fpr());
1393     else
1394         m_fpr = m_jit->fprAllocate();
1395 }
1396 #endif
1397
1398 void SpeculativeJIT::compilePeepHoleDoubleBranch(Node* node, Node* branchNode, JITCompiler::DoubleCondition condition)
1399 {
1400     BasicBlock* taken = branchNode->branchData()->taken.block;
1401     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1402
1403     if (taken == nextBlock()) {
1404         condition = MacroAssembler::invert(condition);
1405         std::swap(taken, notTaken);
1406     }
1407
1408     SpeculateDoubleOperand op1(this, node->child1());
1409     SpeculateDoubleOperand op2(this, node->child2());
1410     
1411     branchDouble(condition, op1.fpr(), op2.fpr(), taken);
1412     jump(notTaken);
1413 }
1414
1415 void SpeculativeJIT::compilePeepHoleObjectEquality(Node* node, Node* branchNode)
1416 {
1417     BasicBlock* taken = branchNode->branchData()->taken.block;
1418     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1419
1420     MacroAssembler::RelationalCondition condition = MacroAssembler::Equal;
1421     
1422     if (taken == nextBlock()) {
1423         condition = MacroAssembler::NotEqual;
1424         BasicBlock* tmp = taken;
1425         taken = notTaken;
1426         notTaken = tmp;
1427     }
1428
1429     SpeculateCellOperand op1(this, node->child1());
1430     SpeculateCellOperand op2(this, node->child2());
1431     
1432     GPRReg op1GPR = op1.gpr();
1433     GPRReg op2GPR = op2.gpr();
1434     
1435     if (masqueradesAsUndefinedWatchpointIsStillValid()) {
1436         if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1437             speculationCheck(
1438                 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(), m_jit.branchIfNotObject(op1GPR));
1439         }
1440         if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1441             speculationCheck(
1442                 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(), m_jit.branchIfNotObject(op2GPR));
1443         }
1444     } else {
1445         if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1446             speculationCheck(
1447                 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1448                 m_jit.branchIfNotObject(op1GPR));
1449         }
1450         speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1451             m_jit.branchTest8(
1452                 MacroAssembler::NonZero, 
1453                 MacroAssembler::Address(op1GPR, JSCell::typeInfoFlagsOffset()), 
1454                 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1455
1456         if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1457             speculationCheck(
1458                 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1459                 m_jit.branchIfNotObject(op2GPR));
1460         }
1461         speculationCheck(BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1462             m_jit.branchTest8(
1463                 MacroAssembler::NonZero, 
1464                 MacroAssembler::Address(op2GPR, JSCell::typeInfoFlagsOffset()), 
1465                 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1466     }
1467
1468     branchPtr(condition, op1GPR, op2GPR, taken);
1469     jump(notTaken);
1470 }
1471
1472 void SpeculativeJIT::compilePeepHoleBooleanBranch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1473 {
1474     BasicBlock* taken = branchNode->branchData()->taken.block;
1475     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1476
1477     // The branch instruction will branch to the taken block.
1478     // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1479     if (taken == nextBlock()) {
1480         condition = JITCompiler::invert(condition);
1481         BasicBlock* tmp = taken;
1482         taken = notTaken;
1483         notTaken = tmp;
1484     }
1485
1486     if (node->child1()->isInt32Constant()) {
1487         int32_t imm = node->child1()->asInt32();
1488         SpeculateBooleanOperand op2(this, node->child2());
1489         branch32(condition, JITCompiler::Imm32(imm), op2.gpr(), taken);
1490     } else if (node->child2()->isInt32Constant()) {
1491         SpeculateBooleanOperand op1(this, node->child1());
1492         int32_t imm = node->child2()->asInt32();
1493         branch32(condition, op1.gpr(), JITCompiler::Imm32(imm), taken);
1494     } else {
1495         SpeculateBooleanOperand op1(this, node->child1());
1496         SpeculateBooleanOperand op2(this, node->child2());
1497         branch32(condition, op1.gpr(), op2.gpr(), taken);
1498     }
1499
1500     jump(notTaken);
1501 }
1502
1503 void SpeculativeJIT::compileStringSlice(Node* node)
1504 {
1505     SpeculateCellOperand string(this, node->child1());
1506     GPRTemporary startIndex(this);
1507     GPRTemporary temp(this);
1508     GPRTemporary temp2(this);
1509
1510     GPRReg stringGPR = string.gpr();
1511     GPRReg startIndexGPR = startIndex.gpr();
1512     GPRReg tempGPR = temp.gpr();
1513     GPRReg temp2GPR = temp2.gpr();
1514
1515     speculateString(node->child1(), stringGPR);
1516
1517     {
1518         m_jit.load32(JITCompiler::Address(stringGPR, JSString::offsetOfLength()), temp2GPR);
1519
1520         emitPopulateSliceIndex(node->child2(), temp2GPR, startIndexGPR);
1521         if (node->child3())
1522             emitPopulateSliceIndex(node->child3(), temp2GPR, tempGPR);
1523         else
1524             m_jit.move(temp2GPR, tempGPR);
1525     }
1526
1527     CCallHelpers::JumpList doneCases;
1528     CCallHelpers::JumpList slowCases;
1529
1530     auto nonEmptyCase = m_jit.branch32(MacroAssembler::Below, startIndexGPR, tempGPR);
1531     m_jit.move(TrustedImmPtr::weakPointer(m_jit.graph(), jsEmptyString(&vm())), tempGPR);
1532     doneCases.append(m_jit.jump());
1533
1534     nonEmptyCase.link(&m_jit);
1535     m_jit.sub32(startIndexGPR, tempGPR); // the size of the sliced string.
1536     slowCases.append(m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(1)));
1537
1538     m_jit.loadPtr(MacroAssembler::Address(stringGPR, JSString::offsetOfValue()), temp2GPR);
1539     slowCases.append(m_jit.branchTestPtr(MacroAssembler::Zero, temp2GPR));
1540
1541     m_jit.loadPtr(MacroAssembler::Address(temp2GPR, StringImpl::dataOffset()), tempGPR);
1542
1543     // Load the character into scratchReg
1544     m_jit.zeroExtend32ToPtr(startIndexGPR, startIndexGPR);
1545     auto is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(temp2GPR, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
1546
1547     m_jit.load8(MacroAssembler::BaseIndex(tempGPR, startIndexGPR, MacroAssembler::TimesOne, 0), tempGPR);
1548     auto cont8Bit = m_jit.jump();
1549
1550     is16Bit.link(&m_jit);
1551     m_jit.load16(MacroAssembler::BaseIndex(tempGPR, startIndexGPR, MacroAssembler::TimesTwo, 0), tempGPR);
1552
1553     auto bigCharacter = m_jit.branch32(MacroAssembler::AboveOrEqual, tempGPR, TrustedImm32(0x100));
1554
1555     // 8 bit string values don't need the isASCII check.
1556     cont8Bit.link(&m_jit);
1557
1558     m_jit.lshift32(MacroAssembler::TrustedImm32(sizeof(void*) == 4 ? 2 : 3), tempGPR);
1559     m_jit.addPtr(TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), tempGPR);
1560     m_jit.loadPtr(tempGPR, tempGPR);
1561
1562     addSlowPathGenerator(
1563         slowPathCall(
1564             bigCharacter, this, operationSingleCharacterString, tempGPR, tempGPR));
1565
1566     addSlowPathGenerator(
1567         slowPathCall(
1568             slowCases, this, operationStringSubstr, tempGPR, stringGPR, startIndexGPR, tempGPR));
1569
1570     doneCases.link(&m_jit);
1571     cellResult(tempGPR, node);
1572 }
1573
1574 void SpeculativeJIT::compileToLowerCase(Node* node)
1575 {
1576     ASSERT(node->op() == ToLowerCase);
1577     SpeculateCellOperand string(this, node->child1());
1578     GPRTemporary temp(this);
1579     GPRTemporary index(this);
1580     GPRTemporary charReg(this);
1581     GPRTemporary length(this);
1582
1583     GPRReg stringGPR = string.gpr();
1584     GPRReg tempGPR = temp.gpr();
1585     GPRReg indexGPR = index.gpr();
1586     GPRReg charGPR = charReg.gpr();
1587     GPRReg lengthGPR = length.gpr();
1588
1589     speculateString(node->child1(), stringGPR);
1590
1591     CCallHelpers::JumpList slowPath;
1592
1593     m_jit.move(TrustedImmPtr(nullptr), indexGPR);
1594
1595     m_jit.loadPtr(MacroAssembler::Address(stringGPR, JSString::offsetOfValue()), tempGPR);
1596     slowPath.append(m_jit.branchTestPtr(MacroAssembler::Zero, tempGPR));
1597
1598     slowPath.append(m_jit.branchTest32(
1599         MacroAssembler::Zero, MacroAssembler::Address(tempGPR, StringImpl::flagsOffset()),
1600         MacroAssembler::TrustedImm32(StringImpl::flagIs8Bit())));
1601     m_jit.load32(MacroAssembler::Address(tempGPR, StringImpl::lengthMemoryOffset()), lengthGPR);
1602     m_jit.loadPtr(MacroAssembler::Address(tempGPR, StringImpl::dataOffset()), tempGPR);
1603
1604     auto loopStart = m_jit.label();
1605     auto loopDone = m_jit.branch32(CCallHelpers::AboveOrEqual, indexGPR, lengthGPR);
1606     m_jit.load8(MacroAssembler::BaseIndex(tempGPR, indexGPR, MacroAssembler::TimesOne), charGPR);
1607     slowPath.append(m_jit.branchTest32(CCallHelpers::NonZero, charGPR, TrustedImm32(~0x7F)));
1608     m_jit.sub32(TrustedImm32('A'), charGPR);
1609     slowPath.append(m_jit.branch32(CCallHelpers::BelowOrEqual, charGPR, TrustedImm32('Z' - 'A')));
1610
1611     m_jit.add32(TrustedImm32(1), indexGPR);
1612     m_jit.jump().linkTo(loopStart, &m_jit);
1613     
1614     slowPath.link(&m_jit);
1615     silentSpillAllRegisters(lengthGPR);
1616     callOperation(operationToLowerCase, lengthGPR, stringGPR, indexGPR);
1617     silentFillAllRegisters();
1618     m_jit.exceptionCheck();
1619     auto done = m_jit.jump();
1620
1621     loopDone.link(&m_jit);
1622     m_jit.move(stringGPR, lengthGPR);
1623
1624     done.link(&m_jit);
1625     cellResult(lengthGPR, node);
1626 }
1627
1628 void SpeculativeJIT::compilePeepHoleInt32Branch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1629 {
1630     BasicBlock* taken = branchNode->branchData()->taken.block;
1631     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1632
1633     // The branch instruction will branch to the taken block.
1634     // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1635     if (taken == nextBlock()) {
1636         condition = JITCompiler::invert(condition);
1637         BasicBlock* tmp = taken;
1638         taken = notTaken;
1639         notTaken = tmp;
1640     }
1641
1642     if (node->child1()->isInt32Constant()) {
1643         int32_t imm = node->child1()->asInt32();
1644         SpeculateInt32Operand op2(this, node->child2());
1645         branch32(condition, JITCompiler::Imm32(imm), op2.gpr(), taken);
1646     } else if (node->child2()->isInt32Constant()) {
1647         SpeculateInt32Operand op1(this, node->child1());
1648         int32_t imm = node->child2()->asInt32();
1649         branch32(condition, op1.gpr(), JITCompiler::Imm32(imm), taken);
1650     } else {
1651         SpeculateInt32Operand op1(this, node->child1());
1652         SpeculateInt32Operand op2(this, node->child2());
1653         branch32(condition, op1.gpr(), op2.gpr(), taken);
1654     }
1655
1656     jump(notTaken);
1657 }
1658
1659 // Returns true if the compare is fused with a subsequent branch.
1660 bool SpeculativeJIT::compilePeepHoleBranch(Node* node, MacroAssembler::RelationalCondition condition, MacroAssembler::DoubleCondition doubleCondition, S_JITOperation_EJJ operation)
1661 {
1662     // Fused compare & branch.
1663     unsigned branchIndexInBlock = detectPeepHoleBranch();
1664     if (branchIndexInBlock != UINT_MAX) {
1665         Node* branchNode = m_block->at(branchIndexInBlock);
1666
1667         // detectPeepHoleBranch currently only permits the branch to be the very next node,
1668         // so can be no intervening nodes to also reference the compare. 
1669         ASSERT(node->adjustedRefCount() == 1);
1670
1671         if (node->isBinaryUseKind(Int32Use))
1672             compilePeepHoleInt32Branch(node, branchNode, condition);
1673 #if USE(JSVALUE64)
1674         else if (node->isBinaryUseKind(Int52RepUse))
1675             compilePeepHoleInt52Branch(node, branchNode, condition);
1676 #endif // USE(JSVALUE64)
1677         else if (node->isBinaryUseKind(StringUse) || node->isBinaryUseKind(StringIdentUse)) {
1678             // Use non-peephole comparison, for now.
1679             return false;
1680         } else if (node->isBinaryUseKind(DoubleRepUse))
1681             compilePeepHoleDoubleBranch(node, branchNode, doubleCondition);
1682         else if (node->op() == CompareEq) {
1683             if (node->isBinaryUseKind(BooleanUse))
1684                 compilePeepHoleBooleanBranch(node, branchNode, condition);
1685             else if (node->isBinaryUseKind(SymbolUse))
1686                 compilePeepHoleSymbolEquality(node, branchNode);
1687             else if (node->isBinaryUseKind(ObjectUse))
1688                 compilePeepHoleObjectEquality(node, branchNode);
1689             else if (node->isBinaryUseKind(ObjectUse, ObjectOrOtherUse))
1690                 compilePeepHoleObjectToObjectOrOtherEquality(node->child1(), node->child2(), branchNode);
1691             else if (node->isBinaryUseKind(ObjectOrOtherUse, ObjectUse))
1692                 compilePeepHoleObjectToObjectOrOtherEquality(node->child2(), node->child1(), branchNode);
1693             else if (!needsTypeCheck(node->child1(), SpecOther))
1694                 nonSpeculativePeepholeBranchNullOrUndefined(node->child2(), branchNode);
1695             else if (!needsTypeCheck(node->child2(), SpecOther))
1696                 nonSpeculativePeepholeBranchNullOrUndefined(node->child1(), branchNode);
1697             else {
1698                 nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1699                 return true;
1700             }
1701         } else {
1702             nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1703             return true;
1704         }
1705
1706         use(node->child1());
1707         use(node->child2());
1708         m_indexInBlock = branchIndexInBlock;
1709         m_currentNode = branchNode;
1710         return true;
1711     }
1712     return false;
1713 }
1714
1715 void SpeculativeJIT::noticeOSRBirth(Node* node)
1716 {
1717     if (!node->hasVirtualRegister())
1718         return;
1719     
1720     VirtualRegister virtualRegister = node->virtualRegister();
1721     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1722     
1723     info.noticeOSRBirth(*m_stream, node, virtualRegister);
1724 }
1725
1726 void SpeculativeJIT::compileMovHint(Node* node)
1727 {
1728     ASSERT(node->containsMovHint() && node->op() != ZombieHint);
1729     
1730     Node* child = node->child1().node();
1731     noticeOSRBirth(child);
1732     
1733     m_stream->appendAndLog(VariableEvent::movHint(MinifiedID(child), node->unlinkedLocal()));
1734 }
1735
1736 void SpeculativeJIT::bail(AbortReason reason)
1737 {
1738     if (verboseCompilationEnabled())
1739         dataLog("Bailing compilation.\n");
1740     m_compileOkay = true;
1741     m_jit.abortWithReason(reason, m_lastGeneratedNode);
1742     clearGenerationInfo();
1743 }
1744
1745 void SpeculativeJIT::compileCurrentBlock()
1746 {
1747     ASSERT(m_compileOkay);
1748     
1749     if (!m_block)
1750         return;
1751     
1752     ASSERT(m_block->isReachable);
1753     
1754     m_jit.blockHeads()[m_block->index] = m_jit.label();
1755
1756     if (!m_block->intersectionOfCFAHasVisited) {
1757         // Don't generate code for basic blocks that are unreachable according to CFA.
1758         // But to be sure that nobody has generated a jump to this block, drop in a
1759         // breakpoint here.
1760         m_jit.abortWithReason(DFGUnreachableBasicBlock);
1761         return;
1762     }
1763
1764     if (m_block->isCatchEntrypoint) {
1765         m_jit.addPtr(CCallHelpers::TrustedImm32(-(m_jit.graph().frameRegisterCount() * sizeof(Register))), GPRInfo::callFrameRegister,  CCallHelpers::stackPointerRegister);
1766         if (Options::zeroStackFrame())
1767             m_jit.clearStackFrame(GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister, GPRInfo::regT0, m_jit.graph().frameRegisterCount() * sizeof(Register));
1768         m_jit.emitSaveCalleeSaves();
1769         m_jit.emitMaterializeTagCheckRegisters();
1770         m_jit.emitPutToCallFrameHeader(m_jit.codeBlock(), CallFrameSlot::codeBlock);
1771     }
1772
1773     m_stream->appendAndLog(VariableEvent::reset());
1774     
1775     m_jit.jitAssertHasValidCallFrame();
1776     m_jit.jitAssertTagsInPlace();
1777     m_jit.jitAssertArgumentCountSane();
1778
1779     m_state.reset();
1780     m_state.beginBasicBlock(m_block);
1781     
1782     for (size_t i = m_block->variablesAtHead.size(); i--;) {
1783         int operand = m_block->variablesAtHead.operandForIndex(i);
1784         Node* node = m_block->variablesAtHead[i];
1785         if (!node)
1786             continue; // No need to record dead SetLocal's.
1787         
1788         VariableAccessData* variable = node->variableAccessData();
1789         DataFormat format;
1790         if (!node->refCount())
1791             continue; // No need to record dead SetLocal's.
1792         format = dataFormatFor(variable->flushFormat());
1793         m_stream->appendAndLog(
1794             VariableEvent::setLocal(
1795                 VirtualRegister(operand),
1796                 variable->machineLocal(),
1797                 format));
1798     }
1799
1800     m_origin = NodeOrigin();
1801     
1802     for (m_indexInBlock = 0; m_indexInBlock < m_block->size(); ++m_indexInBlock) {
1803         m_currentNode = m_block->at(m_indexInBlock);
1804         
1805         // We may have hit a contradiction that the CFA was aware of but that the JIT
1806         // didn't cause directly.
1807         if (!m_state.isValid()) {
1808             bail(DFGBailedAtTopOfBlock);
1809             return;
1810         }
1811
1812         m_interpreter.startExecuting();
1813         m_interpreter.executeKnownEdgeTypes(m_currentNode);
1814         m_jit.setForNode(m_currentNode);
1815         m_origin = m_currentNode->origin;
1816         if (validationEnabled())
1817             m_origin.exitOK &= mayExit(m_jit.graph(), m_currentNode) == Exits;
1818         m_lastGeneratedNode = m_currentNode->op();
1819         
1820         ASSERT(m_currentNode->shouldGenerate());
1821         
1822         if (verboseCompilationEnabled()) {
1823             dataLogF(
1824                 "SpeculativeJIT generating Node @%d (bc#%u) at JIT offset 0x%x",
1825                 (int)m_currentNode->index(),
1826                 m_currentNode->origin.semantic.bytecodeIndex, m_jit.debugOffset());
1827             dataLog("\n");
1828         }
1829
1830         if (Options::validateDFGExceptionHandling() && (mayExit(m_jit.graph(), m_currentNode) != DoesNotExit || m_currentNode->isTerminal()))
1831             m_jit.jitReleaseAssertNoException(*m_jit.vm());
1832
1833         m_jit.pcToCodeOriginMapBuilder().appendItem(m_jit.labelIgnoringWatchpoints(), m_origin.semantic);
1834
1835         compile(m_currentNode);
1836         
1837         if (belongsInMinifiedGraph(m_currentNode->op()))
1838             m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode));
1839         
1840 #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
1841         m_jit.clearRegisterAllocationOffsets();
1842 #endif
1843         
1844         if (!m_compileOkay) {
1845             bail(DFGBailedAtEndOfNode);
1846             return;
1847         }
1848         
1849         // Make sure that the abstract state is rematerialized for the next node.
1850         m_interpreter.executeEffects(m_indexInBlock);
1851     }
1852     
1853     // Perform the most basic verification that children have been used correctly.
1854     if (!ASSERT_DISABLED) {
1855         for (auto& info : m_generationInfo)
1856             RELEASE_ASSERT(!info.alive());
1857     }
1858 }
1859
1860 // If we are making type predictions about our arguments then
1861 // we need to check that they are correct on function entry.
1862 void SpeculativeJIT::checkArgumentTypes()
1863 {
1864     ASSERT(!m_currentNode);
1865     m_origin = NodeOrigin(CodeOrigin(0), CodeOrigin(0), true);
1866
1867     auto& arguments = m_jit.graph().m_rootToArguments.find(m_jit.graph().block(0))->value;
1868     for (int i = 0; i < m_jit.codeBlock()->numParameters(); ++i) {
1869         Node* node = arguments[i];
1870         if (!node) {
1871             // The argument is dead. We don't do any checks for such arguments.
1872             continue;
1873         }
1874         
1875         ASSERT(node->op() == SetArgument);
1876         ASSERT(node->shouldGenerate());
1877
1878         VariableAccessData* variableAccessData = node->variableAccessData();
1879         FlushFormat format = variableAccessData->flushFormat();
1880         
1881         if (format == FlushedJSValue)
1882             continue;
1883         
1884         VirtualRegister virtualRegister = variableAccessData->local();
1885
1886         JSValueSource valueSource = JSValueSource(JITCompiler::addressFor(virtualRegister));
1887         
1888 #if USE(JSVALUE64)
1889         switch (format) {
1890         case FlushedInt32: {
1891             speculationCheck(BadType, valueSource, node, m_jit.branch64(MacroAssembler::Below, JITCompiler::addressFor(virtualRegister), GPRInfo::tagTypeNumberRegister));
1892             break;
1893         }
1894         case FlushedBoolean: {
1895             GPRTemporary temp(this);
1896             m_jit.load64(JITCompiler::addressFor(virtualRegister), temp.gpr());
1897             m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), temp.gpr());
1898             speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, temp.gpr(), TrustedImm32(static_cast<int32_t>(~1))));
1899             break;
1900         }
1901         case FlushedCell: {
1902             speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, JITCompiler::addressFor(virtualRegister), GPRInfo::tagMaskRegister));
1903             break;
1904         }
1905         default:
1906             RELEASE_ASSERT_NOT_REACHED();
1907             break;
1908         }
1909 #else
1910         switch (format) {
1911         case FlushedInt32: {
1912             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::Int32Tag)));
1913             break;
1914         }
1915         case FlushedBoolean: {
1916             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::BooleanTag)));
1917             break;
1918         }
1919         case FlushedCell: {
1920             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::CellTag)));
1921             break;
1922         }
1923         default:
1924             RELEASE_ASSERT_NOT_REACHED();
1925             break;
1926         }
1927 #endif
1928     }
1929
1930     m_origin = NodeOrigin();
1931 }
1932
1933 bool SpeculativeJIT::compile()
1934 {
1935     checkArgumentTypes();
1936     
1937     ASSERT(!m_currentNode);
1938     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1939         m_jit.setForBlockIndex(blockIndex);
1940         m_block = m_jit.graph().block(blockIndex);
1941         compileCurrentBlock();
1942     }
1943     linkBranches();
1944     return true;
1945 }
1946
1947 void SpeculativeJIT::createOSREntries()
1948 {
1949     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1950         BasicBlock* block = m_jit.graph().block(blockIndex);
1951         if (!block)
1952             continue;
1953         if (block->isOSRTarget || block->isCatchEntrypoint) {
1954             // Currently we don't have OSR entry trampolines. We could add them
1955             // here if need be.
1956             m_osrEntryHeads.append(m_jit.blockHeads()[blockIndex]);
1957         }
1958     }
1959 }
1960
1961 void SpeculativeJIT::linkOSREntries(LinkBuffer& linkBuffer)
1962 {
1963     unsigned osrEntryIndex = 0;
1964     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1965         BasicBlock* block = m_jit.graph().block(blockIndex);
1966         if (!block)
1967             continue;
1968         if (!block->isOSRTarget && !block->isCatchEntrypoint)
1969             continue;
1970         if (block->isCatchEntrypoint) {
1971             auto& argumentsVector = m_jit.graph().m_rootToArguments.find(block)->value;
1972             Vector<FlushFormat> argumentFormats;
1973             argumentFormats.reserveInitialCapacity(argumentsVector.size());
1974             for (Node* setArgument : argumentsVector) {
1975                 if (setArgument) {
1976                     FlushFormat flushFormat = setArgument->variableAccessData()->flushFormat();
1977                     ASSERT(flushFormat == FlushedInt32 || flushFormat == FlushedCell || flushFormat == FlushedBoolean || flushFormat == FlushedJSValue);
1978                     argumentFormats.uncheckedAppend(flushFormat);
1979                 } else
1980                     argumentFormats.uncheckedAppend(DeadFlush);
1981             }
1982             m_jit.noticeCatchEntrypoint(*block, m_osrEntryHeads[osrEntryIndex++], linkBuffer, WTFMove(argumentFormats));
1983         } else {
1984             ASSERT(block->isOSRTarget);
1985             m_jit.noticeOSREntry(*block, m_osrEntryHeads[osrEntryIndex++], linkBuffer);
1986         }
1987     }
1988
1989     m_jit.jitCode()->finalizeOSREntrypoints();
1990     m_jit.jitCode()->common.finalizeCatchEntrypoints();
1991
1992     ASSERT(osrEntryIndex == m_osrEntryHeads.size());
1993     
1994     if (verboseCompilationEnabled()) {
1995         DumpContext dumpContext;
1996         dataLog("OSR Entries:\n");
1997         for (OSREntryData& entryData : m_jit.jitCode()->osrEntry)
1998             dataLog("    ", inContext(entryData, &dumpContext), "\n");
1999         if (!dumpContext.isEmpty())
2000             dumpContext.dump(WTF::dataFile());
2001     }
2002 }
2003     
2004 void SpeculativeJIT::compileCheckTraps(Node*)
2005 {
2006     ASSERT(Options::usePollingTraps());
2007     GPRTemporary unused(this);
2008     GPRReg unusedGPR = unused.gpr();
2009
2010     JITCompiler::Jump needTrapHandling = m_jit.branchTest8(JITCompiler::NonZero,
2011         JITCompiler::AbsoluteAddress(m_jit.vm()->needTrapHandlingAddress()));
2012
2013     addSlowPathGenerator(slowPathCall(needTrapHandling, this, operationHandleTraps, unusedGPR));
2014 }
2015
2016 void SpeculativeJIT::compileDoublePutByVal(Node* node, SpeculateCellOperand& base, SpeculateStrictInt32Operand& property)
2017 {
2018     Edge child3 = m_jit.graph().varArgChild(node, 2);
2019     Edge child4 = m_jit.graph().varArgChild(node, 3);
2020
2021     ArrayMode arrayMode = node->arrayMode();
2022     
2023     GPRReg baseReg = base.gpr();
2024     GPRReg propertyReg = property.gpr();
2025     
2026     SpeculateDoubleOperand value(this, child3);
2027
2028     FPRReg valueReg = value.fpr();
2029     
2030     DFG_TYPE_CHECK(
2031         JSValueRegs(), child3, SpecFullRealNumber,
2032         m_jit.branchDouble(
2033             MacroAssembler::DoubleNotEqualOrUnordered, valueReg, valueReg));
2034     
2035     if (!m_compileOkay)
2036         return;
2037     
2038     StorageOperand storage(this, child4);
2039     GPRReg storageReg = storage.gpr();
2040
2041     if (node->op() == PutByValAlias) {
2042         // Store the value to the array.
2043         GPRReg propertyReg = property.gpr();
2044         FPRReg valueReg = value.fpr();
2045         m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
2046         
2047         noResult(m_currentNode);
2048         return;
2049     }
2050     
2051     GPRTemporary temporary;
2052     GPRReg temporaryReg = temporaryRegisterForPutByVal(temporary, node);
2053
2054     MacroAssembler::Jump slowCase;
2055     
2056     if (arrayMode.isInBounds()) {
2057         speculationCheck(
2058             OutOfBounds, JSValueRegs(), 0,
2059             m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())));
2060     } else {
2061         MacroAssembler::Jump inBounds = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
2062         
2063         slowCase = m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfVectorLength()));
2064         
2065         if (!arrayMode.isOutOfBounds())
2066             speculationCheck(OutOfBounds, JSValueRegs(), 0, slowCase);
2067         
2068         m_jit.add32(TrustedImm32(1), propertyReg, temporaryReg);
2069         m_jit.store32(temporaryReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
2070
2071         inBounds.link(&m_jit);
2072     }
2073     
2074     m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
2075
2076     base.use();
2077     property.use();
2078     value.use();
2079     storage.use();
2080     
2081     if (arrayMode.isOutOfBounds()) {
2082         addSlowPathGenerator(
2083             slowPathCall(
2084                 slowCase, this,
2085                 m_jit.codeBlock()->isStrictMode()
2086                     ? (node->op() == PutByValDirect ? operationPutDoubleByValDirectBeyondArrayBoundsStrict : operationPutDoubleByValBeyondArrayBoundsStrict)
2087                     : (node->op() == PutByValDirect ? operationPutDoubleByValDirectBeyondArrayBoundsNonStrict : operationPutDoubleByValBeyondArrayBoundsNonStrict),
2088                 NoResult, baseReg, propertyReg, valueReg));
2089     }
2090
2091     noResult(m_currentNode, UseChildrenCalledExplicitly);
2092 }
2093
2094 void SpeculativeJIT::compileGetCharCodeAt(Node* node)
2095 {
2096     SpeculateCellOperand string(this, node->child1());
2097     SpeculateStrictInt32Operand index(this, node->child2());
2098     StorageOperand storage(this, node->child3());
2099
2100     GPRReg stringReg = string.gpr();
2101     GPRReg indexReg = index.gpr();
2102     GPRReg storageReg = storage.gpr();
2103     
2104     ASSERT(speculationChecked(m_state.forNode(node->child1()).m_type, SpecString));
2105
2106     // unsigned comparison so we can filter out negative indices and indices that are too large
2107     speculationCheck(Uncountable, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::AboveOrEqual, indexReg, MacroAssembler::Address(stringReg, JSString::offsetOfLength())));
2108
2109     GPRTemporary scratch(this);
2110     GPRReg scratchReg = scratch.gpr();
2111
2112     m_jit.loadPtr(MacroAssembler::Address(stringReg, JSString::offsetOfValue()), scratchReg);
2113
2114     // Load the character into scratchReg
2115     JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
2116
2117     m_jit.load8(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesOne, 0), scratchReg);
2118     JITCompiler::Jump cont8Bit = m_jit.jump();
2119
2120     is16Bit.link(&m_jit);
2121
2122     m_jit.load16(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesTwo, 0), scratchReg);
2123
2124     cont8Bit.link(&m_jit);
2125
2126     int32Result(scratchReg, m_currentNode);
2127 }
2128
2129 void SpeculativeJIT::compileGetByValOnString(Node* node)
2130 {
2131     SpeculateCellOperand base(this, m_graph.child(node, 0));
2132     SpeculateStrictInt32Operand property(this, m_graph.child(node, 1));
2133     StorageOperand storage(this, m_graph.child(node, 2));
2134     GPRReg baseReg = base.gpr();
2135     GPRReg propertyReg = property.gpr();
2136     GPRReg storageReg = storage.gpr();
2137
2138     GPRTemporary scratch(this);
2139     GPRReg scratchReg = scratch.gpr();
2140 #if USE(JSVALUE32_64)
2141     GPRTemporary resultTag;
2142     GPRReg resultTagReg = InvalidGPRReg;
2143     if (node->arrayMode().isOutOfBounds()) {
2144         GPRTemporary realResultTag(this);
2145         resultTag.adopt(realResultTag);
2146         resultTagReg = resultTag.gpr();
2147     }
2148 #endif
2149
2150     ASSERT(ArrayMode(Array::String).alreadyChecked(m_jit.graph(), node, m_state.forNode(m_graph.child(node, 0))));
2151
2152     // unsigned comparison so we can filter out negative indices and indices that are too large
2153     JITCompiler::Jump outOfBounds = m_jit.branch32(
2154         MacroAssembler::AboveOrEqual, propertyReg,
2155         MacroAssembler::Address(baseReg, JSString::offsetOfLength()));
2156     if (node->arrayMode().isInBounds())
2157         speculationCheck(OutOfBounds, JSValueRegs(), 0, outOfBounds);
2158
2159     m_jit.loadPtr(MacroAssembler::Address(baseReg, JSString::offsetOfValue()), scratchReg);
2160
2161     // Load the character into scratchReg
2162     JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
2163
2164     m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne, 0), scratchReg);
2165     JITCompiler::Jump cont8Bit = m_jit.jump();
2166
2167     is16Bit.link(&m_jit);
2168
2169     m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo, 0), scratchReg);
2170
2171     JITCompiler::Jump bigCharacter =
2172         m_jit.branch32(MacroAssembler::AboveOrEqual, scratchReg, TrustedImm32(0x100));
2173
2174     // 8 bit string values don't need the isASCII check.
2175     cont8Bit.link(&m_jit);
2176
2177     m_jit.lshift32(MacroAssembler::TrustedImm32(sizeof(void*) == 4 ? 2 : 3), scratchReg);
2178     m_jit.addPtr(TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), scratchReg);
2179     m_jit.loadPtr(scratchReg, scratchReg);
2180
2181     addSlowPathGenerator(
2182         slowPathCall(
2183             bigCharacter, this, operationSingleCharacterString, scratchReg, scratchReg));
2184
2185     if (node->arrayMode().isOutOfBounds()) {
2186 #if USE(JSVALUE32_64)
2187         m_jit.move(TrustedImm32(JSValue::CellTag), resultTagReg);
2188 #endif
2189
2190         JSGlobalObject* globalObject = m_jit.globalObjectFor(node->origin.semantic);
2191         bool prototypeChainIsSane = false;
2192         if (globalObject->stringPrototypeChainIsSane()) {
2193             // FIXME: This could be captured using a Speculation mode that means "out-of-bounds
2194             // loads return a trivial value". Something like SaneChainOutOfBounds. This should
2195             // speculate that we don't take negative out-of-bounds, or better yet, it should rely
2196             // on a stringPrototypeChainIsSane() guaranteeing that the prototypes have no negative
2197             // indexed properties either.
2198             // https://bugs.webkit.org/show_bug.cgi?id=144668
2199             m_jit.graph().registerAndWatchStructureTransition(globalObject->stringPrototype()->structure());
2200             m_jit.graph().registerAndWatchStructureTransition(globalObject->objectPrototype()->structure());
2201             prototypeChainIsSane = globalObject->stringPrototypeChainIsSane();
2202         }
2203         if (prototypeChainIsSane) {
2204 #if USE(JSVALUE64)
2205             addSlowPathGenerator(std::make_unique<SaneStringGetByValSlowPathGenerator>(
2206                 outOfBounds, this, JSValueRegs(scratchReg), baseReg, propertyReg));
2207 #else
2208             addSlowPathGenerator(std::make_unique<SaneStringGetByValSlowPathGenerator>(
2209                 outOfBounds, this, JSValueRegs(resultTagReg, scratchReg),
2210                 baseReg, propertyReg));
2211 #endif
2212         } else {
2213 #if USE(JSVALUE64)
2214             addSlowPathGenerator(
2215                 slowPathCall(
2216                     outOfBounds, this, operationGetByValStringInt,
2217                     scratchReg, baseReg, propertyReg));
2218 #else
2219             addSlowPathGenerator(
2220                 slowPathCall(
2221                     outOfBounds, this, operationGetByValStringInt,
2222                     JSValueRegs(resultTagReg, scratchReg), baseReg, propertyReg));
2223 #endif
2224         }
2225         
2226 #if USE(JSVALUE64)
2227         jsValueResult(scratchReg, m_currentNode);
2228 #else
2229         jsValueResult(resultTagReg, scratchReg, m_currentNode);
2230 #endif
2231     } else
2232         cellResult(scratchReg, m_currentNode);
2233 }
2234
2235 void SpeculativeJIT::compileFromCharCode(Node* node)
2236 {
2237     Edge& child = node->child1();
2238     if (child.useKind() == UntypedUse) {
2239         JSValueOperand opr(this, child);
2240         JSValueRegs oprRegs = opr.jsValueRegs();
2241
2242         flushRegisters();
2243         JSValueRegsFlushedCallResult result(this);
2244         JSValueRegs resultRegs = result.regs();
2245         callOperation(operationStringFromCharCodeUntyped, resultRegs, oprRegs);
2246         m_jit.exceptionCheck();
2247         
2248         jsValueResult(resultRegs, node);
2249         return;
2250     }
2251
2252     SpeculateStrictInt32Operand property(this, child);
2253     GPRReg propertyReg = property.gpr();
2254     GPRTemporary smallStrings(this);
2255     GPRTemporary scratch(this);
2256     GPRReg scratchReg = scratch.gpr();
2257     GPRReg smallStringsReg = smallStrings.gpr();
2258
2259     JITCompiler::JumpList slowCases;
2260     slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, TrustedImm32(0xff)));
2261     m_jit.move(TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), smallStringsReg);
2262     m_jit.loadPtr(MacroAssembler::BaseIndex(smallStringsReg, propertyReg, MacroAssembler::ScalePtr, 0), scratchReg);
2263
2264     slowCases.append(m_jit.branchTest32(MacroAssembler::Zero, scratchReg));
2265     addSlowPathGenerator(slowPathCall(slowCases, this, operationStringFromCharCode, scratchReg, propertyReg));
2266     cellResult(scratchReg, m_currentNode);
2267 }
2268
2269 GeneratedOperandType SpeculativeJIT::checkGeneratedTypeForToInt32(Node* node)
2270 {
2271     VirtualRegister virtualRegister = node->virtualRegister();
2272     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
2273
2274     switch (info.registerFormat()) {
2275     case DataFormatStorage:
2276         RELEASE_ASSERT_NOT_REACHED();
2277
2278     case DataFormatBoolean:
2279     case DataFormatCell:
2280         terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
2281         return GeneratedOperandTypeUnknown;
2282
2283     case DataFormatNone:
2284     case DataFormatJSCell:
2285     case DataFormatJS:
2286     case DataFormatJSBoolean:
2287     case DataFormatJSDouble:
2288         return GeneratedOperandJSValue;
2289
2290     case DataFormatJSInt32:
2291     case DataFormatInt32:
2292         return GeneratedOperandInteger;
2293
2294     default:
2295         RELEASE_ASSERT_NOT_REACHED();
2296         return GeneratedOperandTypeUnknown;
2297     }
2298 }
2299
2300 void SpeculativeJIT::compileValueToInt32(Node* node)
2301 {
2302     switch (node->child1().useKind()) {
2303 #if USE(JSVALUE64)
2304     case Int52RepUse: {
2305         SpeculateStrictInt52Operand op1(this, node->child1());
2306         GPRTemporary result(this, Reuse, op1);
2307         GPRReg op1GPR = op1.gpr();
2308         GPRReg resultGPR = result.gpr();
2309         m_jit.zeroExtend32ToPtr(op1GPR, resultGPR);
2310         int32Result(resultGPR, node, DataFormatInt32);
2311         return;
2312     }
2313 #endif // USE(JSVALUE64)
2314         
2315     case DoubleRepUse: {
2316         GPRTemporary result(this);
2317         SpeculateDoubleOperand op1(this, node->child1());
2318         FPRReg fpr = op1.fpr();
2319         GPRReg gpr = result.gpr();
2320         JITCompiler::Jump notTruncatedToInteger = m_jit.branchTruncateDoubleToInt32(fpr, gpr, JITCompiler::BranchIfTruncateFailed);
2321         
2322         addSlowPathGenerator(slowPathCall(notTruncatedToInteger, this,
2323             hasSensibleDoubleToInt() ? operationToInt32SensibleSlow : operationToInt32, NeedToSpill, ExceptionCheckRequirement::CheckNotNeeded, gpr, fpr));
2324         
2325         int32Result(gpr, node);
2326         return;
2327     }
2328     
2329     case NumberUse:
2330     case NotCellUse: {
2331         switch (checkGeneratedTypeForToInt32(node->child1().node())) {
2332         case GeneratedOperandInteger: {
2333             SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
2334             GPRTemporary result(this, Reuse, op1);
2335             m_jit.move(op1.gpr(), result.gpr());
2336             int32Result(result.gpr(), node, op1.format());
2337             return;
2338         }
2339         case GeneratedOperandJSValue: {
2340             GPRTemporary result(this);
2341 #if USE(JSVALUE64)
2342             JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2343
2344             GPRReg gpr = op1.gpr();
2345             GPRReg resultGpr = result.gpr();
2346             FPRTemporary tempFpr(this);
2347             FPRReg fpr = tempFpr.fpr();
2348
2349             JITCompiler::Jump isInteger = m_jit.branch64(MacroAssembler::AboveOrEqual, gpr, GPRInfo::tagTypeNumberRegister);
2350             JITCompiler::JumpList converted;
2351
2352             if (node->child1().useKind() == NumberUse) {
2353                 DFG_TYPE_CHECK(
2354                     JSValueRegs(gpr), node->child1(), SpecBytecodeNumber,
2355                     m_jit.branchTest64(
2356                         MacroAssembler::Zero, gpr, GPRInfo::tagTypeNumberRegister));
2357             } else {
2358                 JITCompiler::Jump isNumber = m_jit.branchTest64(MacroAssembler::NonZero, gpr, GPRInfo::tagTypeNumberRegister);
2359                 
2360                 DFG_TYPE_CHECK(
2361                     JSValueRegs(gpr), node->child1(), ~SpecCellCheck, m_jit.branchIfCell(JSValueRegs(gpr)));
2362                 
2363                 // It's not a cell: so true turns into 1 and all else turns into 0.
2364                 m_jit.compare64(JITCompiler::Equal, gpr, TrustedImm32(ValueTrue), resultGpr);
2365                 converted.append(m_jit.jump());
2366                 
2367                 isNumber.link(&m_jit);
2368             }
2369
2370             // First, if we get here we have a double encoded as a JSValue
2371             unboxDouble(gpr, resultGpr, fpr);
2372
2373             silentSpillAllRegisters(resultGpr);
2374             callOperation(operationToInt32, resultGpr, fpr);
2375             silentFillAllRegisters();
2376
2377             converted.append(m_jit.jump());
2378
2379             isInteger.link(&m_jit);
2380             m_jit.zeroExtend32ToPtr(gpr, resultGpr);
2381
2382             converted.link(&m_jit);
2383 #else
2384             Node* childNode = node->child1().node();
2385             VirtualRegister virtualRegister = childNode->virtualRegister();
2386             GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
2387
2388             JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2389
2390             GPRReg payloadGPR = op1.payloadGPR();
2391             GPRReg resultGpr = result.gpr();
2392         
2393             JITCompiler::JumpList converted;
2394
2395             if (info.registerFormat() == DataFormatJSInt32)
2396                 m_jit.move(payloadGPR, resultGpr);
2397             else {
2398                 GPRReg tagGPR = op1.tagGPR();
2399                 FPRTemporary tempFpr(this);
2400                 FPRReg fpr = tempFpr.fpr();
2401                 FPRTemporary scratch(this);
2402
2403                 JITCompiler::Jump isInteger = m_jit.branch32(MacroAssembler::Equal, tagGPR, TrustedImm32(JSValue::Int32Tag));
2404
2405                 if (node->child1().useKind() == NumberUse) {
2406                     DFG_TYPE_CHECK(
2407                         op1.jsValueRegs(), node->child1(), SpecBytecodeNumber,
2408                         m_jit.branch32(
2409                             MacroAssembler::AboveOrEqual, tagGPR,
2410                             TrustedImm32(JSValue::LowestTag)));
2411                 } else {
2412                     JITCompiler::Jump isNumber = m_jit.branch32(MacroAssembler::Below, tagGPR, TrustedImm32(JSValue::LowestTag));
2413                     
2414                     DFG_TYPE_CHECK(
2415                         op1.jsValueRegs(), node->child1(), ~SpecCell,
2416                         m_jit.branchIfCell(op1.jsValueRegs()));
2417                     
2418                     // It's not a cell: so true turns into 1 and all else turns into 0.
2419                     JITCompiler::Jump isBoolean = m_jit.branch32(JITCompiler::Equal, tagGPR, TrustedImm32(JSValue::BooleanTag));
2420                     m_jit.move(TrustedImm32(0), resultGpr);
2421                     converted.append(m_jit.jump());
2422                     
2423                     isBoolean.link(&m_jit);
2424                     m_jit.move(payloadGPR, resultGpr);
2425                     converted.append(m_jit.jump());
2426                     
2427                     isNumber.link(&m_jit);
2428                 }
2429
2430                 unboxDouble(tagGPR, payloadGPR, fpr, scratch.fpr());
2431
2432                 silentSpillAllRegisters(resultGpr);
2433                 callOperation(operationToInt32, resultGpr, fpr);
2434                 silentFillAllRegisters();
2435
2436                 converted.append(m_jit.jump());
2437
2438                 isInteger.link(&m_jit);
2439                 m_jit.move(payloadGPR, resultGpr);
2440
2441                 converted.link(&m_jit);
2442             }
2443 #endif
2444             int32Result(resultGpr, node);
2445             return;
2446         }
2447         case GeneratedOperandTypeUnknown:
2448             RELEASE_ASSERT(!m_compileOkay);
2449             return;
2450         }
2451         RELEASE_ASSERT_NOT_REACHED();
2452         return;
2453     }
2454     
2455     default:
2456         ASSERT(!m_compileOkay);
2457         return;
2458     }
2459 }
2460
2461 void SpeculativeJIT::compileUInt32ToNumber(Node* node)
2462 {
2463     if (doesOverflow(node->arithMode())) {
2464         if (enableInt52()) {
2465             SpeculateInt32Operand op1(this, node->child1());
2466             GPRTemporary result(this, Reuse, op1);
2467             m_jit.zeroExtend32ToPtr(op1.gpr(), result.gpr());
2468             strictInt52Result(result.gpr(), node);
2469             return;
2470         }
2471         SpeculateInt32Operand op1(this, node->child1());
2472         FPRTemporary result(this);
2473             
2474         GPRReg inputGPR = op1.gpr();
2475         FPRReg outputFPR = result.fpr();
2476             
2477         m_jit.convertInt32ToDouble(inputGPR, outputFPR);
2478             
2479         JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, inputGPR, TrustedImm32(0));
2480         m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), outputFPR);
2481         positive.link(&m_jit);
2482             
2483         doubleResult(outputFPR, node);
2484         return;
2485     }
2486     
2487     RELEASE_ASSERT(node->arithMode() == Arith::CheckOverflow);
2488
2489     SpeculateInt32Operand op1(this, node->child1());
2490     GPRTemporary result(this);
2491
2492     m_jit.move(op1.gpr(), result.gpr());
2493
2494     speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, result.gpr(), TrustedImm32(0)));
2495
2496     int32Result(result.gpr(), node, op1.format());
2497 }
2498
2499 void SpeculativeJIT::compileDoubleAsInt32(Node* node)
2500 {
2501     SpeculateDoubleOperand op1(this, node->child1());
2502     FPRTemporary scratch(this);
2503     GPRTemporary result(this);
2504     
2505     FPRReg valueFPR = op1.fpr();
2506     FPRReg scratchFPR = scratch.fpr();
2507     GPRReg resultGPR = result.gpr();
2508
2509     JITCompiler::JumpList failureCases;
2510     RELEASE_ASSERT(shouldCheckOverflow(node->arithMode()));
2511     m_jit.branchConvertDoubleToInt32(
2512         valueFPR, resultGPR, failureCases, scratchFPR,
2513         shouldCheckNegativeZero(node->arithMode()));
2514     speculationCheck(Overflow, JSValueRegs(), 0, failureCases);
2515
2516     int32Result(resultGPR, node);
2517 }
2518
2519 void SpeculativeJIT::compileDoubleRep(Node* node)
2520 {
2521     switch (node->child1().useKind()) {
2522     case RealNumberUse: {
2523         JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2524         FPRTemporary result(this);
2525         
2526         JSValueRegs op1Regs = op1.jsValueRegs();
2527         FPRReg resultFPR = result.fpr();
2528         
2529 #if USE(JSVALUE64)
2530         GPRTemporary temp(this);
2531         GPRReg tempGPR = temp.gpr();
2532         m_jit.unboxDoubleWithoutAssertions(op1Regs.gpr(), tempGPR, resultFPR);
2533 #else
2534         FPRTemporary temp(this);
2535         FPRReg tempFPR = temp.fpr();
2536         unboxDouble(op1Regs.tagGPR(), op1Regs.payloadGPR(), resultFPR, tempFPR);
2537 #endif
2538         
2539         JITCompiler::Jump done = m_jit.branchDouble(
2540             JITCompiler::DoubleEqual, resultFPR, resultFPR);
2541         
2542         DFG_TYPE_CHECK(
2543             op1Regs, node->child1(), SpecBytecodeRealNumber, m_jit.branchIfNotInt32(op1Regs));
2544         m_jit.convertInt32ToDouble(op1Regs.payloadGPR(), resultFPR);
2545         
2546         done.link(&m_jit);
2547         
2548         doubleResult(resultFPR, node);
2549         return;
2550     }
2551     
2552     case NotCellUse:
2553     case NumberUse: {
2554         ASSERT(!node->child1()->isNumberConstant()); // This should have been constant folded.
2555
2556         SpeculatedType possibleTypes = m_state.forNode(node->child1()).m_type;
2557         if (isInt32Speculation(possibleTypes)) {
2558             SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
2559             FPRTemporary result(this);
2560             m_jit.convertInt32ToDouble(op1.gpr(), result.fpr());
2561             doubleResult(result.fpr(), node);
2562             return;
2563         }
2564
2565         JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2566         FPRTemporary result(this);
2567
2568 #if USE(JSVALUE64)
2569         GPRTemporary temp(this);
2570
2571         GPRReg op1GPR = op1.gpr();
2572         GPRReg tempGPR = temp.gpr();
2573         FPRReg resultFPR = result.fpr();
2574         JITCompiler::JumpList done;
2575
2576         JITCompiler::Jump isInteger = m_jit.branch64(
2577             MacroAssembler::AboveOrEqual, op1GPR, GPRInfo::tagTypeNumberRegister);
2578
2579         if (node->child1().useKind() == NotCellUse) {
2580             JITCompiler::Jump isNumber = m_jit.branchTest64(MacroAssembler::NonZero, op1GPR, GPRInfo::tagTypeNumberRegister);
2581             JITCompiler::Jump isUndefined = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueUndefined));
2582
2583             static const double zero = 0;
2584             m_jit.loadDouble(TrustedImmPtr(&zero), resultFPR);
2585
2586             JITCompiler::Jump isNull = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueNull));
2587             done.append(isNull);
2588
2589             DFG_TYPE_CHECK(JSValueRegs(op1GPR), node->child1(), ~SpecCellCheck,
2590                 m_jit.branchTest64(JITCompiler::Zero, op1GPR, TrustedImm32(static_cast<int32_t>(TagBitBool))));
2591
2592             JITCompiler::Jump isFalse = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueFalse));
2593             static const double one = 1;
2594             m_jit.loadDouble(TrustedImmPtr(&one), resultFPR);
2595             done.append(m_jit.jump());
2596             done.append(isFalse);
2597
2598             isUndefined.link(&m_jit);
2599             static const double NaN = PNaN;
2600             m_jit.loadDouble(TrustedImmPtr(&NaN), resultFPR);
2601             done.append(m_jit.jump());
2602
2603             isNumber.link(&m_jit);
2604         } else if (needsTypeCheck(node->child1(), SpecBytecodeNumber)) {
2605             typeCheck(
2606                 JSValueRegs(op1GPR), node->child1(), SpecBytecodeNumber,
2607                 m_jit.branchTest64(MacroAssembler::Zero, op1GPR, GPRInfo::tagTypeNumberRegister));
2608         }
2609
2610         unboxDouble(op1GPR, tempGPR, resultFPR);
2611         done.append(m_jit.jump());
2612     
2613         isInteger.link(&m_jit);
2614         m_jit.convertInt32ToDouble(op1GPR, resultFPR);
2615         done.link(&m_jit);
2616 #else // USE(JSVALUE64) -> this is the 32_64 case
2617         FPRTemporary temp(this);
2618     
2619         GPRReg op1TagGPR = op1.tagGPR();
2620         GPRReg op1PayloadGPR = op1.payloadGPR();
2621         FPRReg tempFPR = temp.fpr();
2622         FPRReg resultFPR = result.fpr();
2623         JITCompiler::JumpList done;
2624     
2625         JITCompiler::Jump isInteger = m_jit.branch32(
2626             MacroAssembler::Equal, op1TagGPR, TrustedImm32(JSValue::Int32Tag));
2627
2628         if (node->child1().useKind() == NotCellUse) {
2629             JITCompiler::Jump isNumber = m_jit.branch32(JITCompiler::Below, op1TagGPR, JITCompiler::TrustedImm32(JSValue::LowestTag + 1));
2630             JITCompiler::Jump isUndefined = m_jit.branch32(JITCompiler::Equal, op1TagGPR, TrustedImm32(JSValue::UndefinedTag));
2631
2632             static const double zero = 0;
2633             m_jit.loadDouble(TrustedImmPtr(&zero), resultFPR);
2634
2635             JITCompiler::Jump isNull = m_jit.branch32(JITCompiler::Equal, op1TagGPR, TrustedImm32(JSValue::NullTag));
2636             done.append(isNull);
2637
2638             DFG_TYPE_CHECK(JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), ~SpecCell, m_jit.branch32(JITCompiler::NotEqual, op1TagGPR, TrustedImm32(JSValue::BooleanTag)));
2639
2640             JITCompiler::Jump isFalse = m_jit.branchTest32(JITCompiler::Zero, op1PayloadGPR, TrustedImm32(1));
2641             static const double one = 1;
2642             m_jit.loadDouble(TrustedImmPtr(&one), resultFPR);
2643             done.append(m_jit.jump());
2644             done.append(isFalse);
2645
2646             isUndefined.link(&m_jit);
2647             static const double NaN = PNaN;
2648             m_jit.loadDouble(TrustedImmPtr(&NaN), resultFPR);
2649             done.append(m_jit.jump());
2650
2651             isNumber.link(&m_jit);
2652         } else if (needsTypeCheck(node->child1(), SpecBytecodeNumber)) {
2653             typeCheck(
2654                 JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), SpecBytecodeNumber,
2655                 m_jit.branch32(MacroAssembler::AboveOrEqual, op1TagGPR, TrustedImm32(JSValue::LowestTag)));
2656         }
2657
2658         unboxDouble(op1TagGPR, op1PayloadGPR, resultFPR, tempFPR);
2659         done.append(m_jit.jump());
2660     
2661         isInteger.link(&m_jit);
2662         m_jit.convertInt32ToDouble(op1PayloadGPR, resultFPR);
2663         done.link(&m_jit);
2664 #endif // USE(JSVALUE64)
2665     
2666         doubleResult(resultFPR, node);
2667         return;
2668     }
2669         
2670 #if USE(JSVALUE64)
2671     case Int52RepUse: {
2672         SpeculateStrictInt52Operand value(this, node->child1());
2673         FPRTemporary result(this);
2674         
2675         GPRReg valueGPR = value.gpr();
2676         FPRReg resultFPR = result.fpr();
2677
2678         m_jit.convertInt64ToDouble(valueGPR, resultFPR);
2679         
2680         doubleResult(resultFPR, node);
2681         return;
2682     }
2683 #endif // USE(JSVALUE64)
2684         
2685     default:
2686         RELEASE_ASSERT_NOT_REACHED();
2687         return;
2688     }
2689 }
2690
2691 void SpeculativeJIT::compileValueRep(Node* node)
2692 {
2693     switch (node->child1().useKind()) {
2694     case DoubleRepUse: {
2695         SpeculateDoubleOperand value(this, node->child1());
2696         JSValueRegsTemporary result(this);
2697         
2698         FPRReg valueFPR = value.fpr();
2699         JSValueRegs resultRegs = result.regs();
2700         
2701         // It's very tempting to in-place filter the value to indicate that it's not impure NaN
2702         // anymore. Unfortunately, this would be unsound. If it's a GetLocal or if the value was
2703         // subject to a prior SetLocal, filtering the value would imply that the corresponding
2704         // local was purified.
2705         if (needsTypeCheck(node->child1(), ~SpecDoubleImpureNaN))
2706             m_jit.purifyNaN(valueFPR);
2707
2708         boxDouble(valueFPR, resultRegs);
2709         
2710         jsValueResult(resultRegs, node);
2711         return;
2712     }
2713         
2714 #if USE(JSVALUE64)
2715     case Int52RepUse: {
2716         SpeculateStrictInt52Operand value(this, node->child1());
2717         GPRTemporary result(this);
2718         
2719         GPRReg valueGPR = value.gpr();
2720         GPRReg resultGPR = result.gpr();
2721         
2722         boxInt52(valueGPR, resultGPR, DataFormatStrictInt52);
2723         
2724         jsValueResult(resultGPR, node);
2725         return;
2726     }
2727 #endif // USE(JSVALUE64)
2728         
2729     default:
2730         RELEASE_ASSERT_NOT_REACHED();
2731         return;
2732     }
2733 }
2734
2735 static double clampDoubleToByte(double d)
2736 {
2737     d += 0.5;
2738     if (!(d > 0))
2739         d = 0;
2740     else if (d > 255)
2741         d = 255;
2742     return d;
2743 }
2744
2745 static void compileClampIntegerToByte(JITCompiler& jit, GPRReg result)
2746 {
2747     MacroAssembler::Jump inBounds = jit.branch32(MacroAssembler::BelowOrEqual, result, JITCompiler::TrustedImm32(0xff));
2748     MacroAssembler::Jump tooBig = jit.branch32(MacroAssembler::GreaterThan, result, JITCompiler::TrustedImm32(0xff));
2749     jit.xorPtr(result, result);
2750     MacroAssembler::Jump clamped = jit.jump();
2751     tooBig.link(&jit);
2752     jit.move(JITCompiler::TrustedImm32(255), result);
2753     clamped.link(&jit);
2754     inBounds.link(&jit);
2755 }
2756
2757 static void compileClampDoubleToByte(JITCompiler& jit, GPRReg result, FPRReg source, FPRReg scratch)
2758 {
2759     // Unordered compare so we pick up NaN
2760     static const double zero = 0;
2761     static const double byteMax = 255;
2762     static const double half = 0.5;
2763     jit.loadDouble(JITCompiler::TrustedImmPtr(&zero), scratch);
2764     MacroAssembler::Jump tooSmall = jit.branchDouble(MacroAssembler::DoubleLessThanOrEqualOrUnordered, source, scratch);
2765     jit.loadDouble(JITCompiler::TrustedImmPtr(&byteMax), scratch);
2766     MacroAssembler::Jump tooBig = jit.branchDouble(MacroAssembler::DoubleGreaterThan, source, scratch);
2767     
2768     jit.loadDouble(JITCompiler::TrustedImmPtr(&half), scratch);
2769     // FIXME: This should probably just use a floating point round!
2770     // https://bugs.webkit.org/show_bug.cgi?id=72054
2771     jit.addDouble(source, scratch);
2772     jit.truncateDoubleToInt32(scratch, result);   
2773     MacroAssembler::Jump truncatedInt = jit.jump();
2774     
2775     tooSmall.link(&jit);
2776     jit.xorPtr(result, result);
2777     MacroAssembler::Jump zeroed = jit.jump();
2778     
2779     tooBig.link(&jit);
2780     jit.move(JITCompiler::TrustedImm32(255), result);
2781     
2782     truncatedInt.link(&jit);
2783     zeroed.link(&jit);
2784
2785 }
2786
2787 JITCompiler::Jump SpeculativeJIT::jumpForTypedArrayOutOfBounds(Node* node, GPRReg baseGPR, GPRReg indexGPR)
2788 {
2789     if (node->op() == PutByValAlias)
2790         return JITCompiler::Jump();
2791     JSArrayBufferView* view = m_jit.graph().tryGetFoldableView(
2792         m_state.forNode(m_jit.graph().child(node, 0)).m_value, node->arrayMode());
2793     if (view) {
2794         uint32_t length = view->length();
2795         Node* indexNode = m_jit.graph().child(node, 1).node();
2796         if (indexNode->isInt32Constant() && indexNode->asUInt32() < length)
2797             return JITCompiler::Jump();
2798         return m_jit.branch32(
2799             MacroAssembler::AboveOrEqual, indexGPR, MacroAssembler::Imm32(length));
2800     }
2801     return m_jit.branch32(
2802         MacroAssembler::AboveOrEqual, indexGPR,
2803         MacroAssembler::Address(baseGPR, JSArrayBufferView::offsetOfLength()));
2804 }
2805
2806 void SpeculativeJIT::emitTypedArrayBoundsCheck(Node* node, GPRReg baseGPR, GPRReg indexGPR)
2807 {
2808     JITCompiler::Jump jump = jumpForTypedArrayOutOfBounds(node, baseGPR, indexGPR);
2809     if (!jump.isSet())
2810         return;
2811     speculationCheck(OutOfBounds, JSValueRegs(), 0, jump);
2812 }
2813
2814 JITCompiler::Jump SpeculativeJIT::jumpForTypedArrayIsNeuteredIfOutOfBounds(Node* node, GPRReg base, JITCompiler::Jump outOfBounds)
2815 {
2816     JITCompiler::Jump done;
2817     if (outOfBounds.isSet()) {
2818         done = m_jit.jump();
2819         if (node->arrayMode().isInBounds())
2820             speculationCheck(OutOfBounds, JSValueSource(), 0, outOfBounds);
2821         else {
2822             outOfBounds.link(&m_jit);
2823
2824             JITCompiler::Jump notWasteful = m_jit.branch32(
2825                 MacroAssembler::NotEqual,
2826                 MacroAssembler::Address(base, JSArrayBufferView::offsetOfMode()),
2827                 TrustedImm32(WastefulTypedArray));
2828
2829             JITCompiler::Jump hasNullVector = m_jit.branchTestPtr(
2830                 MacroAssembler::Zero,
2831                 MacroAssembler::Address(base, JSArrayBufferView::offsetOfPoisonedVector()));
2832             speculationCheck(Uncountable, JSValueSource(), node, hasNullVector);
2833             notWasteful.link(&m_jit);
2834         }
2835     }
2836     return done;
2837 }
2838
2839 void SpeculativeJIT::loadFromIntTypedArray(GPRReg baseReg, GPRReg storageReg, GPRReg propertyReg, GPRReg resultReg, TypedArrayType type)
2840 {
2841     if (m_indexMaskingMode == IndexMaskingEnabled)
2842         m_jit.and32(MacroAssembler::Address(baseReg, JSObject::butterflyIndexingMaskOffset()), propertyReg);
2843     switch (elementSize(type)) {
2844     case 1:
2845         if (isSigned(type))
2846             m_jit.load8SignedExtendTo32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2847         else
2848             m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2849         break;
2850     case 2:
2851         if (isSigned(type))
2852             m_jit.load16SignedExtendTo32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2853         else
2854             m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2855         break;
2856     case 4:
2857         m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
2858         break;
2859     default:
2860         CRASH();
2861     }
2862 }
2863
2864 void SpeculativeJIT::setIntTypedArrayLoadResult(Node* node, GPRReg resultReg, TypedArrayType type, bool canSpeculate)
2865 {
2866     if (elementSize(type) < 4 || isSigned(type)) {
2867         int32Result(resultReg, node);
2868         return;
2869     }
2870     
2871     ASSERT(elementSize(type) == 4 && !isSigned(type));
2872     if (node->shouldSpeculateInt32() && canSpeculate) {
2873         speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, resultReg, TrustedImm32(0)));
2874         int32Result(resultReg, node);
2875         return;
2876     }
2877     
2878 #if USE(JSVALUE64)
2879     if (node->shouldSpeculateAnyInt()) {
2880         m_jit.zeroExtend32ToPtr(resultReg, resultReg);
2881         strictInt52Result(resultReg, node);
2882         return;
2883     }
2884 #endif
2885     
2886     FPRTemporary fresult(this);
2887     m_jit.convertInt32ToDouble(resultReg, fresult.fpr());
2888     JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, resultReg, TrustedImm32(0));
2889     m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), fresult.fpr());
2890     positive.link(&m_jit);
2891     doubleResult(fresult.fpr(), node);
2892 }
2893
2894 void SpeculativeJIT::compileGetByValOnIntTypedArray(Node* node, TypedArrayType type)
2895 {
2896     ASSERT(isInt(type));
2897     
2898     SpeculateCellOperand base(this, m_graph.varArgChild(node, 0));
2899     SpeculateStrictInt32Operand property(this, m_graph.varArgChild(node, 1));
2900     StorageOperand storage(this, m_graph.varArgChild(node, 2));
2901
2902     GPRReg baseReg = base.gpr();
2903     GPRReg propertyReg = property.gpr();
2904     GPRReg storageReg = storage.gpr();
2905
2906     GPRTemporary result(this);
2907     GPRReg resultReg = result.gpr();
2908
2909     ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(m_graph.varArgChild(node, 0))));
2910
2911     emitTypedArrayBoundsCheck(node, baseReg, propertyReg);
2912     loadFromIntTypedArray(baseReg, storageReg, propertyReg, resultReg, type);
2913     bool canSpeculate = true;
2914     setIntTypedArrayLoadResult(node, resultReg, type, canSpeculate);
2915 }
2916
2917 bool SpeculativeJIT::getIntTypedArrayStoreOperand(
2918     GPRTemporary& value,
2919     GPRReg property,
2920 #if USE(JSVALUE32_64)
2921     GPRTemporary& propertyTag,
2922     GPRTemporary& valueTag,
2923 #endif
2924     Edge valueUse, JITCompiler::JumpList& slowPathCases, bool isClamped)
2925 {
2926     bool isAppropriateConstant = false;
2927     if (valueUse->isConstant()) {
2928         JSValue jsValue = valueUse->asJSValue();
2929         SpeculatedType expectedType = typeFilterFor(valueUse.useKind());
2930         SpeculatedType actualType = speculationFromValue(jsValue);
2931         isAppropriateConstant = (expectedType | actualType) == expectedType;
2932     }
2933     
2934     if (isAppropriateConstant) {
2935         JSValue jsValue = valueUse->asJSValue();
2936         if (!jsValue.isNumber()) {
2937             terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
2938             return false;
2939         }
2940         double d = jsValue.asNumber();
2941         if (isClamped)
2942             d = clampDoubleToByte(d);
2943         GPRTemporary scratch(this);
2944         GPRReg scratchReg = scratch.gpr();
2945         m_jit.move(Imm32(toInt32(d)), scratchReg);
2946         value.adopt(scratch);
2947     } else {
2948         switch (valueUse.useKind()) {
2949         case Int32Use: {
2950             SpeculateInt32Operand valueOp(this, valueUse);
2951             GPRTemporary scratch(this);
2952             GPRReg scratchReg = scratch.gpr();
2953             m_jit.move(valueOp.gpr(), scratchReg);
2954             if (isClamped)
2955                 compileClampIntegerToByte(m_jit, scratchReg);
2956             value.adopt(scratch);
2957             break;
2958         }
2959             
2960 #if USE(JSVALUE64)
2961         case Int52RepUse: {
2962             SpeculateStrictInt52Operand valueOp(this, valueUse);
2963             GPRTemporary scratch(this);
2964             GPRReg scratchReg = scratch.gpr();
2965             m_jit.move(valueOp.gpr(), scratchReg);
2966             if (isClamped) {
2967                 MacroAssembler::Jump inBounds = m_jit.branch64(
2968                     MacroAssembler::BelowOrEqual, scratchReg, JITCompiler::TrustedImm64(0xff));
2969                 MacroAssembler::Jump tooBig = m_jit.branch64(
2970                     MacroAssembler::GreaterThan, scratchReg, JITCompiler::TrustedImm64(0xff));
2971                 m_jit.move(TrustedImm32(0), scratchReg);
2972                 MacroAssembler::Jump clamped = m_jit.jump();
2973                 tooBig.link(&m_jit);
2974                 m_jit.move(JITCompiler::TrustedImm32(255), scratchReg);
2975                 clamped.link(&m_jit);
2976                 inBounds.link(&m_jit);
2977             }
2978             value.adopt(scratch);
2979             break;
2980         }
2981 #endif // USE(JSVALUE64)
2982             
2983         case DoubleRepUse: {
2984             RELEASE_ASSERT(!isAtomicsIntrinsic(m_currentNode->op()));
2985             if (isClamped) {
2986                 SpeculateDoubleOperand valueOp(this, valueUse);
2987                 GPRTemporary result(this);
2988                 FPRTemporary floatScratch(this);
2989                 FPRReg fpr = valueOp.fpr();
2990                 GPRReg gpr = result.gpr();
2991                 compileClampDoubleToByte(m_jit, gpr, fpr, floatScratch.fpr());
2992                 value.adopt(result);
2993             } else {
2994 #if USE(JSVALUE32_64)
2995                 GPRTemporary realPropertyTag(this);
2996                 propertyTag.adopt(realPropertyTag);
2997                 GPRReg propertyTagGPR = propertyTag.gpr();
2998
2999                 GPRTemporary realValueTag(this);
3000                 valueTag.adopt(realValueTag);
3001                 GPRReg valueTagGPR = valueTag.gpr();
3002 #endif
3003                 SpeculateDoubleOperand valueOp(this, valueUse);
3004                 GPRTemporary result(this);
3005                 FPRReg fpr = valueOp.fpr();
3006                 GPRReg gpr = result.gpr();
3007                 MacroAssembler::Jump notNaN = m_jit.branchDouble(MacroAssembler::DoubleEqual, fpr, fpr);
3008                 m_jit.xorPtr(gpr, gpr);
3009                 MacroAssembler::JumpList fixed(m_jit.jump());
3010                 notNaN.link(&m_jit);
3011
3012                 fixed.append(m_jit.branchTruncateDoubleToInt32(
3013                     fpr, gpr, MacroAssembler::BranchIfTruncateSuccessful));
3014
3015 #if USE(JSVALUE64)
3016                 m_jit.or64(GPRInfo::tagTypeNumberRegister, property);
3017                 boxDouble(fpr, gpr);
3018 #else
3019                 UNUSED_PARAM(property);
3020                 m_jit.move(TrustedImm32(JSValue::Int32Tag), propertyTagGPR);
3021                 boxDouble(fpr, valueTagGPR, gpr);
3022 #endif
3023                 slowPathCases.append(m_jit.jump());
3024
3025                 fixed.link(&m_jit);
3026                 value.adopt(result);
3027             }
3028             break;
3029         }
3030             
3031         default:
3032             RELEASE_ASSERT_NOT_REACHED();
3033             break;
3034         }
3035     }
3036     return true;
3037 }
3038
3039 void SpeculativeJIT::compilePutByValForIntTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
3040 {
3041     ASSERT(isInt(type));
3042     
3043     StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
3044     GPRReg storageReg = storage.gpr();
3045     
3046     Edge valueUse = m_jit.graph().varArgChild(node, 2);
3047     
3048     GPRTemporary value;
3049 #if USE(JSVALUE32_64)
3050     GPRTemporary propertyTag;
3051     GPRTemporary valueTag;
3052 #endif
3053
3054     JITCompiler::JumpList slowPathCases;
3055     
3056     bool result = getIntTypedArrayStoreOperand(
3057         value, property,
3058 #if USE(JSVALUE32_64)
3059         propertyTag, valueTag,
3060 #endif
3061         valueUse, slowPathCases, isClamped(type));
3062     if (!result) {
3063         noResult(node);
3064         return;
3065     }
3066
3067     GPRReg valueGPR = value.gpr();
3068 #if USE(JSVALUE32_64)
3069     GPRReg propertyTagGPR = propertyTag.gpr();
3070     GPRReg valueTagGPR = valueTag.gpr();
3071 #endif
3072
3073     ASSERT_UNUSED(valueGPR, valueGPR != property);
3074     ASSERT(valueGPR != base);
3075     ASSERT(valueGPR != storageReg);
3076     JITCompiler::Jump outOfBounds = jumpForTypedArrayOutOfBounds(node, base, property);
3077
3078     switch (elementSize(type)) {
3079     case 1:
3080         m_jit.store8(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesOne));
3081         break;
3082     case 2:
3083         m_jit.store16(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesTwo));
3084         break;
3085     case 4:
3086         m_jit.store32(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
3087         break;
3088     default:
3089         CRASH();
3090     }
3091
3092     JITCompiler::Jump done = jumpForTypedArrayIsNeuteredIfOutOfBounds(node, base, outOfBounds);
3093     if (done.isSet())
3094         done.link(&m_jit);
3095
3096     if (!slowPathCases.empty()) {
3097 #if USE(JSVALUE64)
3098         if (node->op() == PutByValDirect) {
3099             addSlowPathGenerator(slowPathCall(
3100                 slowPathCases, this,
3101                 m_jit.isStrictModeFor(node->origin.semantic) ? operationPutByValDirectStrict : operationPutByValDirectNonStrict,
3102                 NoResult, base, property, valueGPR));
3103         } else {
3104             addSlowPathGenerator(slowPathCall(
3105                 slowPathCases, this,
3106                 m_jit.isStrictModeFor(node->origin.semantic) ? operationPutByValStrict : operationPutByValNonStrict,
3107                 NoResult, base, property, valueGPR));
3108         }
3109 #else // not USE(JSVALUE64)
3110         if (node->op() == PutByValDirect) {
3111             addSlowPathGenerator(slowPathCall(
3112                 slowPathCases, this,
3113                 m_jit.codeBlock()->isStrictMode() ? operationPutByValDirectCellStrict : operationPutByValDirectCellNonStrict,
3114                 NoResult, base, JSValueRegs(propertyTagGPR, property), JSValueRegs(valueTagGPR, valueGPR)));
3115         } else {
3116             addSlowPathGenerator(slowPathCall(
3117                 slowPathCases, this,
3118                 m_jit.codeBlock()->isStrictMode() ? operationPutByValCellStrict : operationPutByValCellNonStrict,
3119                 NoResult, base, JSValueRegs(propertyTagGPR, property), JSValueRegs(valueTagGPR, valueGPR)));
3120         }
3121 #endif
3122     }
3123     
3124     noResult(node);
3125 }
3126
3127 void SpeculativeJIT::compileGetByValOnFloatTypedArray(Node* node, TypedArrayType type)
3128 {
3129     ASSERT(isFloat(type));
3130     
3131     SpeculateCellOperand base(this, m_graph.varArgChild(node, 0));
3132     SpeculateStrictInt32Operand property(this, m_graph.varArgChild(node, 1));
3133     StorageOperand storage(this, m_graph.varArgChild(node, 2));
3134
3135     GPRReg baseReg = base.gpr();
3136     GPRReg propertyReg = property.gpr();
3137     GPRReg storageReg = storage.gpr();
3138
3139     ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(m_graph.varArgChild(node, 0))));
3140
3141     FPRTemporary result(this);
3142     FPRReg resultReg = result.fpr();
3143     emitTypedArrayBoundsCheck(node, baseReg, propertyReg);
3144     if (m_indexMaskingMode == IndexMaskingEnabled)
3145         m_jit.and32(MacroAssembler::Address(baseReg, JSObject::butterflyIndexingMaskOffset()), propertyReg);
3146     switch (elementSize(type)) {
3147     case 4:
3148         m_jit.loadFloat(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
3149         m_jit.convertFloatToDouble(resultReg, resultReg);
3150         break;
3151     case 8: {
3152         m_jit.loadDouble(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight), resultReg);
3153         break;
3154     }
3155     default:
3156         RELEASE_ASSERT_NOT_REACHED();
3157     }
3158     
3159     doubleResult(resultReg, node);
3160 }
3161
3162 void SpeculativeJIT::compilePutByValForFloatTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
3163 {
3164     ASSERT(isFloat(type));
3165     
3166     StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
3167     GPRReg storageReg = storage.gpr();
3168     
3169     Edge baseUse = m_jit.graph().varArgChild(node, 0);
3170     Edge valueUse = m_jit.graph().varArgChild(node, 2);
3171
3172     SpeculateDoubleOperand valueOp(this, valueUse);
3173     FPRTemporary scratch(this);
3174     FPRReg valueFPR = valueOp.fpr();
3175     FPRReg scratchFPR = scratch.fpr();
3176
3177     ASSERT_UNUSED(baseUse, node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(baseUse)));
3178     
3179     MacroAssembler::Jump outOfBounds = jumpForTypedArrayOutOfBounds(node, base, property);
3180     
3181     switch (elementSize(type)) {
3182     case 4: {
3183         m_jit.moveDouble(valueFPR, scratchFPR);
3184         m_jit.convertDoubleToFloat(valueFPR, scratchFPR);
3185         m_jit.storeFloat(scratchFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
3186         break;
3187     }
3188     case 8:
3189         m_jit.storeDouble(valueFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesEight));
3190         break;
3191     default:
3192         RELEASE_ASSERT_NOT_REACHED();
3193     }
3194
3195     JITCompiler::Jump done = jumpForTypedArrayIsNeuteredIfOutOfBounds(node, base, outOfBounds);
3196     if (done.isSet())
3197         done.link(&m_jit);
3198     noResult(node);
3199 }
3200
3201 void SpeculativeJIT::compileGetByValForObjectWithString(Node* node)
3202 {
3203     SpeculateCellOperand arg1(this, m_graph.varArgChild(node, 0));
3204     SpeculateCellOperand arg2(this, m_graph.varArgChild(node, 1));
3205
3206     GPRReg arg1GPR = arg1.gpr();
3207     GPRReg arg2GPR = arg2.gpr();
3208
3209     speculateObject(m_graph.varArgChild(node, 0), arg1GPR);
3210     speculateString(m_graph.varArgChild(node, 1), arg2GPR);
3211
3212     flushRegisters();
3213     JSValueRegsFlushedCallResult result(this);
3214     JSValueRegs resultRegs = result.regs();
3215     callOperation(operationGetByValObjectString, resultRegs, arg1GPR, arg2GPR);
3216     m_jit.exceptionCheck();
3217
3218     jsValueResult(resultRegs, node);
3219 }
3220
3221 void SpeculativeJIT::compileGetByValForObjectWithSymbol(Node* node)
3222 {
3223     SpeculateCellOperand arg1(this, m_graph.varArgChild(node, 0));
3224     SpeculateCellOperand arg2(this, m_graph.varArgChild(node, 1));
3225
3226     GPRReg arg1GPR = arg1.gpr();
3227     GPRReg arg2GPR = arg2.gpr();
3228
3229     speculateObject(m_graph.varArgChild(node, 0), arg1GPR);
3230     speculateSymbol(m_graph.varArgChild(node, 1), arg2GPR);
3231
3232     flushRegisters();
3233     JSValueRegsFlushedCallResult result(this);
3234     JSValueRegs resultRegs = result.regs();
3235     callOperation(operationGetByValObjectSymbol, resultRegs, arg1GPR, arg2GPR);
3236     m_jit.exceptionCheck();
3237
3238     jsValueResult(resultRegs, node);
3239 }
3240
3241 void SpeculativeJIT::compilePutByValForCellWithString(Node* node, Edge& child1, Edge& child2, Edge& child3)
3242 {
3243     SpeculateCellOperand arg1(this, child1);
3244     SpeculateCellOperand arg2(this, child2);
3245     JSValueOperand arg3(this, child3);
3246
3247     GPRReg arg1GPR = arg1.gpr();
3248     GPRReg arg2GPR = arg2.gpr();
3249     JSValueRegs arg3Regs = arg3.jsValueRegs();
3250
3251     speculateString(child2, arg2GPR);
3252
3253     flushRegisters();
3254     callOperation(m_jit.isStrictModeFor(node->origin.semantic) ? operationPutByValCellStringStrict : operationPutByValCellStringNonStrict, arg1GPR, arg2GPR, arg3Regs);
3255     m_jit.exceptionCheck();
3256
3257     noResult(node);
3258 }
3259
3260 void SpeculativeJIT::compilePutByValForCellWithSymbol(Node* node, Edge& child1, Edge& child2, Edge& child3)
3261 {
3262     SpeculateCellOperand arg1(this, child1);
3263     SpeculateCellOperand arg2(this, child2);
3264     JSValueOperand arg3(this, child3);
3265
3266     GPRReg arg1GPR = arg1.gpr();
3267     GPRReg arg2GPR = arg2.gpr();
3268     JSValueRegs arg3Regs = arg3.jsValueRegs();
3269
3270     speculateSymbol(child2, arg2GPR);
3271
3272     flushRegisters();
3273     callOperation(m_jit.isStrictModeFor(node->origin.semantic) ? operationPutByValCellSymbolStrict : operationPutByValCellSymbolNonStrict, arg1GPR, arg2GPR, arg3Regs);
3274     m_jit.exceptionCheck();
3275
3276     noResult(node);
3277 }
3278
3279 void SpeculativeJIT::compileInstanceOfForObject(Node*, GPRReg valueReg, GPRReg prototypeReg, GPRReg scratchReg, GPRReg scratch2Reg, GPRReg scratch3Reg)
3280 {
3281     // Check that prototype is an object.
3282     speculationCheck(BadType, JSValueRegs(), 0, m_jit.branchIfNotObject(prototypeReg));
3283     
3284     // Initialize scratchReg with the value being checked.
3285     m_jit.move(valueReg, scratchReg);
3286     
3287     // Walk up the prototype chain of the value (in scratchReg), comparing to prototypeReg.
3288     MacroAssembler::Label loop(&m_jit);
3289     MacroAssembler::Jump performDefaultHasInstance = m_jit.branch8(MacroAssembler::Equal,
3290         MacroAssembler::Address(scratchReg, JSCell::typeInfoTypeOffset()), TrustedImm32(ProxyObjectType));
3291     m_jit.emitLoadStructure(*m_jit.vm(), scratchReg, scratch3Reg, scratch2Reg);
3292 #if USE(JSVALUE64)
3293     m_jit.load64(MacroAssembler::Address(scratch3Reg, Structure::prototypeOffset()), scratch3Reg);
3294     auto hasMonoProto = m_jit.branchTest64(JITCompiler::NonZero, scratch3Reg);
3295     m_jit.load64(JITCompiler::Address(scratchReg, offsetRelativeToBase(knownPolyProtoOffset)), scratch3Reg);
3296     hasMonoProto.link(&m_jit);
3297     m_jit.move(scratch3Reg, scratchReg);
3298 #else
3299     m_jit.load32(MacroAssembler::Address(scratch3Reg, Structure::prototypeOffset() + TagOffset), scratch2Reg);
3300     m_jit.load32(MacroAssembler::Address(scratch3Reg, Structure::prototypeOffset() + PayloadOffset), scratch3Reg);
3301     auto hasMonoProto = m_jit.branch32(CCallHelpers::NotEqual, scratch2Reg, TrustedImm32(JSValue::EmptyValueTag));
3302     m_jit.load32(JITCompiler::Address(scratchReg, offsetRelativeToBase(knownPolyProtoOffset) + PayloadOffset), scratch3Reg);
3303     hasMonoProto.link(&m_jit);
3304     m_jit.move(scratch3Reg, scratchReg);
3305 #endif
3306
3307     MacroAssembler::Jump isInstance = m_jit.branchPtr(MacroAssembler::Equal, scratchReg, prototypeReg);
3308 #if USE(JSVALUE64)
3309     m_jit.branchIfCell(JSValueRegs(scratchReg)).linkTo(loop, &m_jit);
3310 #else
3311     m_jit.branchTestPtr(MacroAssembler::NonZero, scratchReg).linkTo(loop, &m_jit);
3312 #endif
3313     
3314     // No match - result is false.
3315 #if USE(JSVALUE64)
3316     m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(false))), scratchReg);
3317 #else
3318     m_jit.move(MacroAssembler::TrustedImm32(0), scratchReg);
3319 #endif
3320     MacroAssembler::JumpList doneJumps; 
3321     doneJumps.append(m_jit.jump());
3322
3323     performDefaultHasInstance.link(&m_jit);
3324     silentSpillAllRegisters(scratchReg);
3325     callOperation(operationDefaultHasInstance, scratchReg, valueReg, prototypeReg); 
3326     silentFillAllRegisters();
3327     m_jit.exceptionCheck();
3328 #if USE(JSVALUE64)
3329     m_jit.or32(TrustedImm32(ValueFalse), scratchReg);
3330 #endif
3331     doneJumps.append(m_jit.jump());
3332     
3333     isInstance.link(&m_jit);
3334 #if USE(JSVALUE64)
3335     m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(true))), scratchReg);
3336 #else
3337     m_jit.move(MacroAssembler::TrustedImm32(1), scratchReg);
3338 #endif
3339     
3340     doneJumps.link(&m_jit);
3341 }
3342
3343 void SpeculativeJIT::compileCheckTypeInfoFlags(Node* node)
3344 {
3345     SpeculateCellOperand base(this, node->child1());
3346
3347     GPRReg baseGPR = base.gpr();
3348
3349     speculationCheck(BadTypeInfoFlags, JSValueRegs(), 0, m_jit.branchTest8(MacroAssembler::Zero, MacroAssembler::Address(baseGPR, JSCell::typeInfoFlagsOffset()), MacroAssembler::TrustedImm32(node->typeInfoOperand())));
3350
3351     noResult(node);
3352 }
3353
3354 void SpeculativeJIT::compileParseInt(Node* node)
3355 {
3356     RELEASE_ASSERT(node->child1().useKind() == UntypedUse || node->child1().useKind() == StringUse);
3357     if (node->child2()) {
3358         SpeculateInt32Operand radix(this, node->child2());
3359         GPRReg radixGPR = radix.gpr();
3360         if (node->child1().useKind() == UntypedUse) {
3361             JSValueOperand value(this, node->child1());
3362             JSValueRegs valueRegs = value.jsValueRegs();
3363
3364             flushRegisters();
3365             JSValueRegsFlushedCallResult result(this);
3366             JSValueRegs resultRegs = result.regs();
3367             callOperation(operationParseIntGeneric, resultRegs, valueRegs, radixGPR);
3368             m_jit.exceptionCheck();
3369             jsValueResult(resultRegs, node);
3370             return;
3371         }
3372
3373         SpeculateCellOperand value(this, node->child1());
3374         GPRReg valueGPR = value.gpr();
3375         speculateString(node->child1(), valueGPR);
3376
3377         flushRegisters();
3378         JSValueRegsFlushedCallResult result(this);
3379         JSValueRegs resultRegs = result.regs();
3380         callOperation(operationParseIntString, resultRegs, valueGPR, radixGPR);
3381         m_jit.exceptionCheck();
3382         jsValueResult(resultRegs, node);
3383         return;
3384     }
3385
3386     if (node->child1().useKind() == UntypedUse) {
3387         JSValueOperand value(this, node->child1());
3388         JSValueRegs valueRegs = value.jsValueRegs();
3389
3390         flushRegisters();
3391         JSValueRegsFlushedCallResult result(this);
3392         JSValueRegs resultRegs = result.regs();
3393         callOperation(operationParseIntNoRadixGeneric, resultRegs, valueRegs);
3394         m_jit.exceptionCheck();
3395         jsValueResult(resultRegs, node);
3396         return;
3397     }
3398
3399     SpeculateCellOperand value(this, node->child1());
3400     GPRReg valueGPR = value.gpr();
3401     speculateString(node->child1(), valueGPR);
3402
3403     flushRegisters();
3404     JSValueRegsFlushedCallResult result(this);
3405     JSValueRegs resultRegs = result.regs();
3406     callOperation(operationParseIntStringNoRadix, resultRegs, valueGPR);
3407     m_jit.exceptionCheck();
3408     jsValueResult(resultRegs, node);
3409 }
3410
3411 void SpeculativeJIT::compileInstanceOf(Node* node)
3412 {
3413     if (node->child1().useKind() == UntypedUse) {
3414         // It might not be a cell. Speculate less aggressively.
3415         // Or: it might only be used once (i.e. by us), so we get zero benefit
3416         // from speculating any more aggressively than we absolutely need to.
3417         
3418         JSValueOperand value(this, node->child1());
3419         SpeculateCellOperand prototype(this, node->child2());
3420         GPRTemporary scratch(this);
3421         GPRTemporary scratch2(this);
3422         GPRTemporary scratch3(this);
3423         
3424         GPRReg prototypeReg = prototype.gpr();
3425         GPRReg scratchReg = scratch.gpr();
3426         GPRReg scratch2Reg = scratch2.gpr();
3427         GPRReg scratch3Reg = scratch3.gpr();
3428         
3429         MacroAssembler::Jump isCell = m_jit.branchIfCell(value.jsValueRegs());
3430         GPRReg valueReg = value.jsValueRegs().payloadGPR();
3431         moveFalseTo(scratchReg);
3432
3433         MacroAssembler::Jump done = m_jit.jump();
3434         
3435         isCell.link(&m_jit);
3436         
3437         compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg, scratch2Reg, scratch3Reg);
3438         
3439         done.link(&m_jit);
3440
3441         blessedBooleanResult(scratchReg, node);
3442         return;
3443     }
3444     
3445     SpeculateCellOperand value(this, node->child1());
3446     SpeculateCellOperand prototype(this, node->child2());
3447     
3448     GPRTemporary scratch(this);
3449     GPRTemporary scratch2(this);
3450     GPRTemporary scratch3(this);
3451     
3452     GPRReg valueReg = value.gpr();
3453     GPRReg prototypeReg = prototype.gpr();
3454     GPRReg scratchReg = scratch.gpr();
3455     GPRReg scratch2Reg = scratch2.gpr();
3456     GPRReg scratch3Reg = scratch3.gpr();
3457     
3458     compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg, scratch2Reg, scratch3Reg);
3459
3460     blessedBooleanResult(scratchReg, node);
3461 }
3462
3463 template<typename SnippetGenerator, J_JITOperation_EJJ snippetSlowPathFunction>
3464 void SpeculativeJIT::emitUntypedBitOp(Node* node)
3465 {
3466     Edge& leftChild = node->child1();
3467     Edge& rightChild = node->child2();
3468
3469     if (isKnownNotNumber(leftChild.node()) || isKnownNotNumber(rightChild.node())) {
3470         JSValueOperand left(this, leftChild);
3471         JSValueOperand right(this, rightChild);
3472         JSValueRegs leftRegs = left.jsValueRegs();
3473         JSValueRegs rightRegs = right.jsValueRegs();
3474
3475         flushRegisters();
3476         JSValueRegsFlushedCallResult result(this);
3477         JSValueRegs resultRegs = result.regs();
3478         callOperation(snippetSlowPathFunction, resultRegs, leftRegs, rightRegs);
3479         m_jit.exceptionCheck();
3480
3481         jsValueResult(resultRegs, node);
3482         return;
3483     }
3484
3485     std::optional<JSValueOperand> left;
3486     std::optional<JSValueOperand> right;
3487
3488     JSValueRegs leftRegs;
3489     JSValueRegs rightRegs;
3490
3491 #if USE(JSVALUE64)
3492     GPRTemporary result(this);
3493     JSValueRegs resultRegs = JSValueRegs(result.gpr());
3494     GPRTemporary scratch(this);
3495     GPRReg scratchGPR = scratch.gpr();
3496 #else
3497     GPRTemporary resultTag(this);
3498     GPRTemporary resultPayload(this);
3499     JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3500     GPRReg scratchGPR = resultTag.gpr();
3501 #endif
3502
3503     SnippetOperand leftOperand;
3504     SnippetOperand rightOperand;
3505
3506     // The snippet generator does not support both operands being constant. If the left
3507     // operand is already const, we'll ignore the right operand's constness.
3508     if (leftChild->isInt32Constant())
3509         leftOperand.setConstInt32(leftChild->asInt32());
3510     else if (rightChild->isInt32Constant())
3511         rightOperand.setConstInt32(rightChild->asInt32());
3512
3513     RELEASE_ASSERT(!leftOperand.isConst() || !rightOperand.isConst());
3514
3515     if (!leftOperand.isConst()) {
3516         left.emplace(this, leftChild);
3517         leftRegs = left->jsValueRegs();
3518     }
3519     if (!rightOperand.isConst()) {
3520         right.emplace(this, rightChild);
3521         rightRegs = right->jsValueRegs();
3522     }
3523
3524     SnippetGenerator gen(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs, scratchGPR);
3525     gen.generateFastPath(m_jit);
3526
3527     ASSERT(gen.didEmitFastPath());
3528     gen.endJumpList().append(m_jit.jump());
3529
3530     gen.slowPathJumpList().link(&m_jit);
3531     silentSpillAllRegisters(resultRegs);
3532
3533     if (leftOperand.isConst()) {
3534         leftRegs = resultRegs;
3535         m_jit.moveValue(leftChild->asJSValue(), leftRegs);
3536     } else if (rightOperand.isConst()) {
3537         rightRegs = resultRegs;
3538         m_jit.moveValue(rightChild->asJSValue(), rightRegs);
3539     }
3540
3541     callOperation(snippetSlowPathFunction, resultRegs, leftRegs, rightRegs);
3542
3543     silentFillAllRegisters();
3544     m_jit.exceptionCheck();
3545
3546     gen.endJumpList().link(&m_jit);
3547     jsValueResult(resultRegs, node);
3548 }
3549
3550 void SpeculativeJIT::compileBitwiseOp(Node* node)
3551 {
3552     NodeType op = node->op();
3553     Edge& leftChild = node->child1();
3554     Edge& rightChild = node->child2();
3555
3556     if (leftChild.useKind() == UntypedUse || rightChild.useKind() == UntypedUse) {
3557         switch (op) {
3558         case BitAnd:
3559             emitUntypedBitOp<JITBitAndGenerator, operationValueBitAnd>(node);
3560             return;
3561         case BitOr:
3562             emitUntypedBitOp<JITBitOrGenerator, operationValueBitOr>(node);
3563             return;
3564         case BitXor:
3565             emitUntypedBitOp<JITBitXorGenerator, operationValueBitXor>(node);
3566             return;
3567         default:
3568             RELEASE_ASSERT_NOT_REACHED();
3569         }
3570     }
3571
3572     if (leftChild->isInt32Constant()) {
3573         SpeculateInt32Operand op2(this, rightChild);
3574         GPRTemporary result(this, Reuse, op2);
3575
3576         bitOp(op, leftChild->asInt32(), op2.gpr(), result.gpr());
3577
3578         int32Result(result.gpr(), node);
3579
3580     } else if (rightChild->isInt32Constant()) {
3581         SpeculateInt32Operand op1(this, leftChild);
3582         GPRTemporary result(this, Reuse, op1);
3583
3584         bitOp(op, rightChild->asInt32(), op1.gpr(), result.gpr());
3585
3586         int32Result(result.gpr(), node);
3587
3588     } else {
3589         SpeculateInt32Operand op1(this, leftChild);
3590         SpeculateInt32Operand op2(this, rightChild);
3591         GPRTemporary result(this, Reuse, op1, op2);
3592         
3593         GPRReg reg1 = op1.gpr();
3594         GPRReg reg2 = op2.gpr();
3595         bitOp(op, reg1, reg2, result.gpr());
3596         
3597         int32Result(result.gpr(), node);
3598     }
3599 }
3600
3601 void SpeculativeJIT::emitUntypedRightShiftBitOp(Node* node)
3602 {
3603     J_JITOperation_EJJ snippetSlowPathFunction = node->op() == BitRShift
3604         ? operationValueBitRShift : operationValueBitURShift;
3605     JITRightShiftGenerator::ShiftType shiftType = node->op() == BitRShift
3606         ? JITRightShiftGenerator::SignedShift : JITRightShiftGenerator::UnsignedShift;
3607
3608     Edge& leftChild = node->child1();
3609     Edge& rightChild = node->child2();
3610
3611     if (isKnownNotNumber(leftChild.node()) || isKnownNotNumber(rightChild.node())) {
3612         JSValueOperand left(this, leftChild);
3613         JSValueOperand right(this, rightChild);
3614         JSValueRegs leftRegs = left.jsValueRegs();
3615         JSValueRegs rightRegs = right.jsValueRegs();
3616
3617         flushRegisters();
3618         JSValueRegsFlushedCallResult result(this);
3619         JSValueRegs resultRegs = result.regs();
3620         callOperation(snippetSlowPathFunction, resultRegs, leftRegs, rightRegs);
3621         m_jit.exceptionCheck();
3622
3623         jsValueResult(resultRegs, node);
3624         return;
3625     }
3626
3627     std::optional<JSValueOperand> left;
3628     std::optional<JSValueOperand> right;
3629
3630     JSValueRegs leftRegs;
3631     JSValueRegs rightRegs;
3632
3633     FPRTemporary leftNumber(this);
3634     FPRReg leftFPR = leftNumber.fpr();
3635
3636 #if USE(JSVALUE64)
3637     GPRTemporary result(this);
3638     JSValueRegs resultRegs = JSValueRegs(result.gpr());
3639     GPRTemporary scratch(this);
3640     GPRReg scratchGPR = scratch.gpr();
3641     FPRReg scratchFPR = InvalidFPRReg;
3642 #else
3643     GPRTemporary resultTag(this);
3644     GPRTemporary resultPayload(this);
3645     JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3646     GPRReg scratchGPR = resultTag.gpr();
3647     FPRTemporary fprScratch(this);
3648     FPRReg scratchFPR = fprScratch.fpr();
3649 #endif
3650
3651     SnippetOperand leftOperand;
3652     SnippetOperand rightOperand;
3653
3654     // The snippet generator does not support both operands being constant. If the left
3655     // operand is already const, we'll ignore the right operand's constness.
3656     if (leftChild->isInt32Constant())
3657         leftOperand.setConstInt32(leftChild->asInt32());
3658     else if (rightChild->isInt32Constant())
3659         rightOperand.setConstInt32(rightChild->asInt32());
3660
3661     RELEASE_ASSERT(!leftOperand.isConst() || !rightOperand.isConst());
3662
3663     if (!leftOperand.isConst()) {
3664         left.emplace(this, leftChild);
3665         leftRegs = left->jsValueRegs();
3666     }
3667     if (!rightOperand.isConst()) {
3668         right.emplace(this, rightChild);
3669         rightRegs = right->jsValueRegs();
3670     }
3671
3672     JITRightShiftGenerator gen(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs,
3673         leftFPR, scratchGPR, scratchFPR, shiftType);
3674     gen.generateFastPath(m_jit);
3675
3676     ASSERT(gen.didEmitFastPath());
3677     gen.endJumpList().append(m_jit.jump());
3678
3679     gen.slowPathJumpList().link(&m_jit);
3680     silentSpillAllRegisters(resultRegs);
3681
3682     if (leftOperand.isConst()) {
3683         leftRegs = resultRegs;
3684         m_jit.moveValue(leftChild->asJSValue(), leftRegs);
3685     } else if (rightOperand.isConst()) {
3686         rightRegs = resultRegs;
3687         m_jit.moveValue(rightChild->asJSValue(), rightRegs);
3688     }
3689
3690     callOperation(snippetSlowPathFunction, resultRegs, leftRegs, rightRegs);
3691
3692     silentFillAllRegisters();
3693     m_jit.exceptionCheck();
3694
3695     gen.endJumpList().link(&m_jit);
3696     jsValueResult(resultRegs, node);
3697     return;
3698 }
3699
3700 void SpeculativeJIT::compileShiftOp(Node* node)
3701 {
3702     NodeType op = node->op();
3703     Edge& leftChild = node->child1();
3704     Edge& rightChild = node->child2();
3705
3706     if (leftChild.useKind() == UntypedUse || rightChild.useKind() == UntypedUse) {
3707         switch (op) {
3708         case BitLShift:
3709             emitUntypedBitOp<JITLeftShiftGenerator, operationValueBitLShift>(node);
3710             return;
3711         case BitRShift:
3712         case BitURShift:
3713             emitUntypedRightShiftBitOp(node);
3714             return;
3715         default:
3716             RELEASE_ASSERT_NOT_REACHED();
3717         }
3718     }
3719
3720     if (rightChild->isInt32Constant()) {
3721         SpeculateInt32Operand op1(this, leftChild);
3722         GPRTemporary result(this, Reuse, op1);
3723
3724         shiftOp(op, op1.gpr(), rightChild->asInt32() & 0x1f, result.gpr());
3725
3726         int32Result(result.gpr(), node);
3727     } else {
3728         // Do not allow shift amount to be used as the result, MacroAssembler does not permit this.
3729         SpeculateInt32Operand op1(this, leftChild);
3730         SpeculateInt32Operand op2(this, rightChild);
3731         GPRTemporary result(this, Reuse, op1);
3732
3733         GPRReg reg1 = op1.gpr();
3734         GPRReg reg2 = op2.gpr();
3735         shiftOp(op, reg1, reg2, result.gpr());
3736
3737         int32Result(result.gpr(), node);
3738     }
3739 }
3740
3741 void SpeculativeJIT::compileValueAdd(Node* node)
3742 {
3743     Edge& leftChild = node->child1();
3744     Edge& rightChild = node->child2();
3745
3746     if (isKnownNotNumber(leftChild.node()) || isKnownNotNumber(rightChild.node())) {
3747         JSValueOperand left(this, leftChild);
3748         JSValueOperand right(this, rightChild);
3749         JSValueRegs leftRegs = left.jsValueRegs();
3750         JSValueRegs rightRegs = right.jsValueRegs();
3751
3752         flushRegisters();
3753         JSValueRegsFlushedCallResult result(this);
3754         JSValueRegs resultRegs = result.regs();
3755         callOperation(operationValueAddNotNumber, resultRegs, leftRegs, rightRegs);
3756         m_jit.exceptionCheck();
3757     
3758         jsValueResult(resultRegs, node);
3759         return;
3760     }
3761
3762 #if USE(JSVALUE64)
3763     bool needsScratchGPRReg = true;
3764     bool needsScratchFPRReg = false;
3765 #else
3766     bool needsScratchGPRReg = true;
3767     bool needsScratchFPRReg = true;
3768 #endif
3769
3770     CodeBlock* baselineCodeBlock = m_jit.graph().baselineCodeBlockFor(node->origin.semantic);
3771     ArithProfile* arithProfile = baselineCodeBlock->arithProfileForBytecodeOffset(node->origin.semantic.bytecodeIndex);
3772     Instruction* instruction = &baselineCodeBlock->instructions()[node->origin.semantic.bytecodeIndex];
3773     JITAddIC* addIC = m_jit.codeBlock()->addJITAddIC(arithProfile, instruction);
3774     auto repatchingFunction = operationValueAddOptimize;
3775     auto nonRepatchingFunction = operationValueAdd;
3776     
3777     compileMathIC(node, addIC, needsScratchGPRReg, needsScratchFPRReg, repatchingFunction, nonRepatchingFunction);
3778 }
3779
3780 template <typename Generator, typename RepatchingFunction, typename NonRepatchingFunction>
3781 void SpeculativeJIT::compileMathIC(Node* node, JITBinaryMathIC<Generator>* mathIC, bool needsScratchGPRReg, bool needsScratchFPRReg, RepatchingFunction repatchingFunction, NonRepatchingFunction nonRepatchingFunction)
3782 {
3783     Edge& leftChild = node->child1();
3784     Edge& rightChild = node->child2();
3785
3786     std::optional<JSValueOperand> left;
3787     std::optional<JSValueOperand> right;
3788
3789     JSValueRegs leftRegs;
3790     JSValueRegs rightRegs;
3791
3792     FPRTemporary leftNumber(this);
3793     FPRTemporary rightNumber(this);
3794     FPRReg leftFPR = leftNumber.fpr();
3795     FPRReg rightFPR = rightNumber.fpr();
3796
3797     GPRReg scratchGPR = InvalidGPRReg;
3798     FPRReg scratchFPR = InvalidFPRReg;
3799
3800     std::optional<FPRTemporary> fprScratch;
3801     if (needsScratchFPRReg) {
3802         fprScratch.emplace(this);
3803         scratchFPR = fprScratch->fpr();
3804     }
3805
3806 #if USE(JSVALUE64)
3807     std::optional<GPRTemporary> gprScratch;
3808     if (needsScratchGPRReg) {
3809         gprScratch.emplace(this);
3810         scratchGPR = gprScratch->gpr();
3811     }
3812     GPRTemporary result(this);
3813     JSValueRegs resultRegs = JSValueRegs(result.gpr());
3814 #else
3815     GPRTemporary resultTag(this);
3816     GPRTemporary resultPayload(this);
3817     JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3818     if (needsScratchGPRReg)
3819         scratchGPR = resultRegs.tagGPR();
3820 #endif
3821
3822     SnippetOperand leftOperand(m_state.forNode(leftChild).resultType());
3823     SnippetOperand rightOperand(m_state.forNode(rightChild).resultType());
3824
3825     // The snippet generator does not support both operands being constant. If the left
3826     // operand is already const, we'll ignore the right operand's constness.
3827     if (leftChild->isInt32Constant())
3828         leftOperand.setConstInt32(leftChild->asInt32());
3829     else if (rightChild->isInt32Constant())
3830         rightOperand.setConstInt32(rightChild->asInt32());
3831
3832     ASSERT(!leftOperand.isConst() || !rightOperand.isConst());
3833     ASSERT(!(Generator::isLeftOperandValidConstant(leftOperand) && Generator::isRightOperandValidConstant(rightOperand)));
3834
3835     if (!Generator::isLeftOperandValidConstant(leftOperand)) {
3836         left.emplace(this, leftChild);
3837         leftRegs = left->jsValueRegs();
3838     }
3839     if (!Generator::isRightOperandValidConstant(rightOperand)) {
3840         right.emplace(this, rightChild);
3841         rightRegs = right->jsValueRegs();
3842     }
3843
3844 #if ENABLE(MATH_IC_STATS)
3845     auto inlineStart = m_jit.label();
3846 #endif
3847
3848     Box<MathICGenerationState> addICGenerationState = Box<MathICGenerationState>::create();
3849     mathIC->m_generator = Generator(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs, leftFPR, rightFPR, scratchGPR, scratchFPR);
3850
3851     bool shouldEmitProfiling = false;
3852     bool generatedInline = mathIC->generateInline(m_jit, *addICGenerationState, shouldEmitProfiling);
3853
3854     if (generatedInline) {
3855         ASSERT(!addICGenerationState->slowPathJumps.empty());
3856
3857         Vector<SilentRegisterSavePlan> savePlans;
3858         silentSpillAllRegistersImpl(false, savePlans, resultRegs);
3859
3860         auto done = m_jit.label();
3861
3862         addSlowPathGenerator([=, savePlans = WTFMove(savePlans)] () {
3863             addICGenerationState->slowPathJumps.link(&m_jit);
3864             addICGenerationState->slowPathStart = m_jit.label();
3865 #if ENABLE(MATH_IC_STATS)
3866             auto slowPathStart = m_jit.label();
3867 #endif
3868
3869             silentSpill(savePlans);
3870
3871             auto innerLeftRegs = leftRegs;
3872             auto innerRightRegs = rightRegs;
3873             if (Generator::isLeftOperandValidConstant(leftOperand)) {
3874                 innerLeftRegs = resultRegs;
3875                 m_jit.moveValue(leftChild->asJSValue(), innerLeftRegs);
3876             } else if (Generator::isRightOperandValidConstant(rightOperand)) {
3877                 innerRightRegs = resultRegs;
3878                 m_jit.moveValue(rightChild->asJSValue(), innerRightRegs);
3879             }
3880
3881             if (addICGenerationState->shouldSlowPathRepatch)
3882                 addICGenerationState->slowPathCall = callOperation(bitwise_cast<J_JITOperation_EJJMic>(repatchingFunction), resultRegs, innerLeftRegs, innerRightRegs, TrustedImmPtr(mathIC));
3883             else
3884                 addICGenerationState->slowPathCall = callOperation(nonRepatchingFunction, resultRegs, innerLeftRegs, innerRightRegs);
3885
3886             silentFill(savePlans);
3887             m_jit.exceptionCheck();
3888             m_jit.jump().linkTo(done, &m_jit);
3889
3890             m_jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
3891                 mathIC->finalizeInlineCode(*addICGenerationState, linkBuffer);
3892             });
3893