Unreviewed, fix simple goof that was causing 32-bit DFG crashes.
[WebKit-https.git] / Source / JavaScriptCore / dfg / DFGSpeculativeJIT.cpp
1 /*
2  * Copyright (C) 2011-2018 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "DFGSpeculativeJIT.h"
28
29 #if ENABLE(DFG_JIT)
30
31 #include "BinarySwitch.h"
32 #include "DFGAbstractInterpreterInlines.h"
33 #include "DFGArrayifySlowPathGenerator.h"
34 #include "DFGCallArrayAllocatorSlowPathGenerator.h"
35 #include "DFGCallCreateDirectArgumentsSlowPathGenerator.h"
36 #include "DFGCallCreateDirectArgumentsWithKnownLengthSlowPathGenerator.h"
37 #include "DFGCapabilities.h"
38 #include "DFGMayExit.h"
39 #include "DFGOSRExitFuzz.h"
40 #include "DFGSaneStringGetByValSlowPathGenerator.h"
41 #include "DFGSlowPathGenerator.h"
42 #include "DFGSnippetParams.h"
43 #include "DirectArguments.h"
44 #include "JITAddGenerator.h"
45 #include "JITBitAndGenerator.h"
46 #include "JITBitOrGenerator.h"
47 #include "JITBitXorGenerator.h"
48 #include "JITDivGenerator.h"
49 #include "JITLeftShiftGenerator.h"
50 #include "JITMulGenerator.h"
51 #include "JITRightShiftGenerator.h"
52 #include "JITSubGenerator.h"
53 #include "JSAsyncFunction.h"
54 #include "JSAsyncGeneratorFunction.h"
55 #include "JSCInlines.h"
56 #include "JSFixedArray.h"
57 #include "JSGeneratorFunction.h"
58 #include "JSLexicalEnvironment.h"
59 #include "JSPropertyNameEnumerator.h"
60 #include "LinkBuffer.h"
61 #include "RegExpConstructor.h"
62 #include "ScopedArguments.h"
63 #include "ScratchRegisterAllocator.h"
64 #include "SuperSampler.h"
65 #include "WeakMapImpl.h"
66 #include <wtf/BitVector.h>
67 #include <wtf/Box.h>
68 #include <wtf/MathExtras.h>
69
70 namespace JSC { namespace DFG {
71
72 SpeculativeJIT::SpeculativeJIT(JITCompiler& jit)
73     : m_compileOkay(true)
74     , m_jit(jit)
75     , m_graph(m_jit.graph())
76     , m_currentNode(0)
77     , m_lastGeneratedNode(LastNodeType)
78     , m_indexInBlock(0)
79     , m_indexMaskingMode(Options::enableSpectreMitigations() ? IndexMaskingEnabled : IndexMaskingDisabled)
80     , m_generationInfo(m_jit.graph().frameRegisterCount())
81     , m_state(m_jit.graph())
82     , m_interpreter(m_jit.graph(), m_state)
83     , m_stream(&jit.jitCode()->variableEventStream)
84     , m_minifiedGraph(&jit.jitCode()->minifiedDFG)
85 {
86 }
87
88 SpeculativeJIT::~SpeculativeJIT()
89 {
90 }
91
92 void SpeculativeJIT::emitAllocateRawObject(GPRReg resultGPR, RegisteredStructure structure, GPRReg storageGPR, unsigned numElements, unsigned vectorLength)
93 {
94     IndexingType indexingType = structure->indexingType();
95     bool hasIndexingHeader = hasIndexedProperties(indexingType);
96
97     unsigned inlineCapacity = structure->inlineCapacity();
98     unsigned outOfLineCapacity = structure->outOfLineCapacity();
99     
100     GPRTemporary scratch(this);
101     GPRTemporary scratch2(this);
102     GPRReg scratchGPR = scratch.gpr();
103     GPRReg scratch2GPR = scratch2.gpr();
104
105     ASSERT(vectorLength >= numElements);
106     vectorLength = Butterfly::optimalContiguousVectorLength(structure.get(), vectorLength);
107     
108     JITCompiler::JumpList slowCases;
109
110     size_t size = 0;
111     if (hasIndexingHeader)
112         size += vectorLength * sizeof(JSValue) + sizeof(IndexingHeader);
113     size += outOfLineCapacity * sizeof(JSValue);
114
115     m_jit.move(TrustedImmPtr(nullptr), storageGPR);
116
117     if (size) {
118         if (Allocator allocator = m_jit.vm()->jsValueGigacageAuxiliarySpace.allocatorForNonVirtual(size, AllocatorForMode::AllocatorIfExists)) {
119             m_jit.emitAllocate(storageGPR, JITAllocator::constant(allocator), scratchGPR, scratch2GPR, slowCases);
120             
121             m_jit.addPtr(
122                 TrustedImm32(outOfLineCapacity * sizeof(JSValue) + sizeof(IndexingHeader)),
123                 storageGPR);
124             
125             if (hasIndexingHeader)
126                 m_jit.store32(TrustedImm32(vectorLength), MacroAssembler::Address(storageGPR, Butterfly::offsetOfVectorLength()));
127         } else
128             slowCases.append(m_jit.jump());
129     }
130
131     size_t allocationSize = JSFinalObject::allocationSize(inlineCapacity);
132     Allocator allocator = subspaceFor<JSFinalObject>(*m_jit.vm())->allocatorForNonVirtual(allocationSize, AllocatorForMode::AllocatorIfExists);
133     if (allocator) {
134         uint32_t mask = WTF::computeIndexingMask(vectorLength);
135         emitAllocateJSObject(resultGPR, JITAllocator::constant(allocator), scratchGPR, TrustedImmPtr(structure), storageGPR, TrustedImm32(mask), scratch2GPR, slowCases);
136         m_jit.emitInitializeInlineStorage(resultGPR, structure->inlineCapacity());
137     } else
138         slowCases.append(m_jit.jump());
139
140     // I want a slow path that also loads out the storage pointer, and that's
141     // what this custom CallArrayAllocatorSlowPathGenerator gives me. It's a lot
142     // of work for a very small piece of functionality. :-/
143     addSlowPathGenerator(std::make_unique<CallArrayAllocatorSlowPathGenerator>(
144         slowCases, this, operationNewRawObject, resultGPR, storageGPR,
145         structure, vectorLength));
146
147     if (numElements < vectorLength) {
148 #if USE(JSVALUE64)
149         if (hasDouble(structure->indexingType()))
150             m_jit.move(TrustedImm64(bitwise_cast<int64_t>(PNaN)), scratchGPR);
151         else
152             m_jit.move(TrustedImm64(JSValue::encode(JSValue())), scratchGPR);
153         for (unsigned i = numElements; i < vectorLength; ++i)
154             m_jit.store64(scratchGPR, MacroAssembler::Address(storageGPR, sizeof(double) * i));
155 #else
156         EncodedValueDescriptor value;
157         if (hasDouble(structure->indexingType()))
158             value.asInt64 = JSValue::encode(JSValue(JSValue::EncodeAsDouble, PNaN));
159         else
160             value.asInt64 = JSValue::encode(JSValue());
161         for (unsigned i = numElements; i < vectorLength; ++i) {
162             m_jit.store32(TrustedImm32(value.asBits.tag), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
163             m_jit.store32(TrustedImm32(value.asBits.payload), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
164         }
165 #endif
166     }
167     
168     if (hasIndexingHeader)
169         m_jit.store32(TrustedImm32(numElements), MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()));
170     
171     m_jit.emitInitializeOutOfLineStorage(storageGPR, structure->outOfLineCapacity());
172     
173     m_jit.mutatorFence(*m_jit.vm());
174 }
175
176 void SpeculativeJIT::emitGetLength(InlineCallFrame* inlineCallFrame, GPRReg lengthGPR, bool includeThis)
177 {
178     if (inlineCallFrame && !inlineCallFrame->isVarargs())
179         m_jit.move(TrustedImm32(inlineCallFrame->argumentCountIncludingThis - !includeThis), lengthGPR);
180     else {
181         VirtualRegister argumentCountRegister = m_jit.argumentCount(inlineCallFrame);
182         m_jit.load32(JITCompiler::payloadFor(argumentCountRegister), lengthGPR);
183         if (!includeThis)
184             m_jit.sub32(TrustedImm32(1), lengthGPR);
185     }
186 }
187
188 void SpeculativeJIT::emitGetLength(CodeOrigin origin, GPRReg lengthGPR, bool includeThis)
189 {
190     emitGetLength(origin.inlineCallFrame, lengthGPR, includeThis);
191 }
192
193 void SpeculativeJIT::emitGetCallee(CodeOrigin origin, GPRReg calleeGPR)
194 {
195     if (origin.inlineCallFrame) {
196         if (origin.inlineCallFrame->isClosureCall) {
197             m_jit.loadPtr(
198                 JITCompiler::addressFor(origin.inlineCallFrame->calleeRecovery.virtualRegister()),
199                 calleeGPR);
200         } else {
201             m_jit.move(
202                 TrustedImmPtr::weakPointer(m_jit.graph(), origin.inlineCallFrame->calleeRecovery.constant().asCell()),
203                 calleeGPR);
204         }
205     } else
206         m_jit.loadPtr(JITCompiler::addressFor(CallFrameSlot::callee), calleeGPR);
207 }
208
209 void SpeculativeJIT::emitGetArgumentStart(CodeOrigin origin, GPRReg startGPR)
210 {
211     m_jit.addPtr(
212         TrustedImm32(
213             JITCompiler::argumentsStart(origin).offset() * static_cast<int>(sizeof(Register))),
214         GPRInfo::callFrameRegister, startGPR);
215 }
216
217 MacroAssembler::Jump SpeculativeJIT::emitOSRExitFuzzCheck()
218 {
219     if (!Options::useOSRExitFuzz()
220         || !canUseOSRExitFuzzing(m_jit.graph().baselineCodeBlockFor(m_origin.semantic))
221         || !doOSRExitFuzzing())
222         return MacroAssembler::Jump();
223     
224     MacroAssembler::Jump result;
225     
226     m_jit.pushToSave(GPRInfo::regT0);
227     m_jit.load32(&g_numberOfOSRExitFuzzChecks, GPRInfo::regT0);
228     m_jit.add32(TrustedImm32(1), GPRInfo::regT0);
229     m_jit.store32(GPRInfo::regT0, &g_numberOfOSRExitFuzzChecks);
230     unsigned atOrAfter = Options::fireOSRExitFuzzAtOrAfter();
231     unsigned at = Options::fireOSRExitFuzzAt();
232     if (at || atOrAfter) {
233         unsigned threshold;
234         MacroAssembler::RelationalCondition condition;
235         if (atOrAfter) {
236             threshold = atOrAfter;
237             condition = MacroAssembler::Below;
238         } else {
239             threshold = at;
240             condition = MacroAssembler::NotEqual;
241         }
242         MacroAssembler::Jump ok = m_jit.branch32(
243             condition, GPRInfo::regT0, MacroAssembler::TrustedImm32(threshold));
244         m_jit.popToRestore(GPRInfo::regT0);
245         result = m_jit.jump();
246         ok.link(&m_jit);
247     }
248     m_jit.popToRestore(GPRInfo::regT0);
249     
250     return result;
251 }
252
253 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail)
254 {
255     if (!m_compileOkay)
256         return;
257     JITCompiler::Jump fuzzJump = emitOSRExitFuzzCheck();
258     if (fuzzJump.isSet()) {
259         JITCompiler::JumpList jumpsToFail;
260         jumpsToFail.append(fuzzJump);
261         jumpsToFail.append(jumpToFail);
262         m_jit.appendExitInfo(jumpsToFail);
263     } else
264         m_jit.appendExitInfo(jumpToFail);
265     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(m_currentNode, node), this, m_stream->size()));
266 }
267
268 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, const MacroAssembler::JumpList& jumpsToFail)
269 {
270     if (!m_compileOkay)
271         return;
272     JITCompiler::Jump fuzzJump = emitOSRExitFuzzCheck();
273     if (fuzzJump.isSet()) {
274         JITCompiler::JumpList myJumpsToFail;
275         myJumpsToFail.append(jumpsToFail);
276         myJumpsToFail.append(fuzzJump);
277         m_jit.appendExitInfo(myJumpsToFail);
278     } else
279         m_jit.appendExitInfo(jumpsToFail);
280     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(m_currentNode, node), this, m_stream->size()));
281 }
282
283 OSRExitJumpPlaceholder SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node)
284 {
285     if (!m_compileOkay)
286         return OSRExitJumpPlaceholder();
287     unsigned index = m_jit.jitCode()->osrExit.size();
288     m_jit.appendExitInfo();
289     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(m_currentNode, node), this, m_stream->size()));
290     return OSRExitJumpPlaceholder(index);
291 }
292
293 OSRExitJumpPlaceholder SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse)
294 {
295     return speculationCheck(kind, jsValueSource, nodeUse.node());
296 }
297
298 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail)
299 {
300     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail);
301 }
302
303 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, const MacroAssembler::JumpList& jumpsToFail)
304 {
305     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpsToFail);
306 }
307
308 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
309 {
310     if (!m_compileOkay)
311         return;
312     unsigned recoveryIndex = m_jit.jitCode()->appendSpeculationRecovery(recovery);
313     m_jit.appendExitInfo(jumpToFail);
314     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(m_currentNode, node), this, m_stream->size(), recoveryIndex));
315 }
316
317 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
318 {
319     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail, recovery);
320 }
321
322 void SpeculativeJIT::emitInvalidationPoint(Node* node)
323 {
324     if (!m_compileOkay)
325         return;
326     OSRExitCompilationInfo& info = m_jit.appendExitInfo(JITCompiler::JumpList());
327     m_jit.jitCode()->appendOSRExit(OSRExit(
328         UncountableInvalidation, JSValueSource(), MethodOfGettingAValueProfile(),
329         this, m_stream->size()));
330     info.m_replacementSource = m_jit.watchpointLabel();
331     ASSERT(info.m_replacementSource.isSet());
332     noResult(node);
333 }
334
335 void SpeculativeJIT::unreachable(Node* node)
336 {
337     m_compileOkay = false;
338     m_jit.abortWithReason(DFGUnreachableNode, node->op());
339 }
340
341 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Node* node)
342 {
343     if (!m_compileOkay)
344         return;
345     speculationCheck(kind, jsValueRegs, node, m_jit.jump());
346     m_compileOkay = false;
347     if (verboseCompilationEnabled())
348         dataLog("Bailing compilation.\n");
349 }
350
351 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Edge nodeUse)
352 {
353     terminateSpeculativeExecution(kind, jsValueRegs, nodeUse.node());
354 }
355
356 void SpeculativeJIT::typeCheck(JSValueSource source, Edge edge, SpeculatedType typesPassedThrough, MacroAssembler::Jump jumpToFail, ExitKind exitKind)
357 {
358     ASSERT(needsTypeCheck(edge, typesPassedThrough));
359     m_interpreter.filter(edge, typesPassedThrough);
360     speculationCheck(exitKind, source, edge.node(), jumpToFail);
361 }
362
363 RegisterSet SpeculativeJIT::usedRegisters()
364 {
365     RegisterSet result;
366     
367     for (unsigned i = GPRInfo::numberOfRegisters; i--;) {
368         GPRReg gpr = GPRInfo::toRegister(i);
369         if (m_gprs.isInUse(gpr))
370             result.set(gpr);
371     }
372     for (unsigned i = FPRInfo::numberOfRegisters; i--;) {
373         FPRReg fpr = FPRInfo::toRegister(i);
374         if (m_fprs.isInUse(fpr))
375             result.set(fpr);
376     }
377     
378     result.merge(RegisterSet::stubUnavailableRegisters());
379     
380     return result;
381 }
382
383 void SpeculativeJIT::addSlowPathGenerator(std::unique_ptr<SlowPathGenerator> slowPathGenerator)
384 {
385     m_slowPathGenerators.append(WTFMove(slowPathGenerator));
386 }
387
388 void SpeculativeJIT::addSlowPathGenerator(std::function<void()> lambda)
389 {
390     m_slowPathLambdas.append(SlowPathLambda{ lambda, m_currentNode, static_cast<unsigned>(m_stream->size()) });
391 }
392
393 void SpeculativeJIT::runSlowPathGenerators(PCToCodeOriginMapBuilder& pcToCodeOriginMapBuilder)
394 {
395     for (auto& slowPathGenerator : m_slowPathGenerators) {
396         pcToCodeOriginMapBuilder.appendItem(m_jit.labelIgnoringWatchpoints(), slowPathGenerator->origin().semantic);
397         slowPathGenerator->generate(this);
398     }
399     for (auto& slowPathLambda : m_slowPathLambdas) {
400         Node* currentNode = slowPathLambda.currentNode;
401         m_currentNode = currentNode;
402         m_outOfLineStreamIndex = slowPathLambda.streamIndex;
403         pcToCodeOriginMapBuilder.appendItem(m_jit.labelIgnoringWatchpoints(), currentNode->origin.semantic);
404         slowPathLambda.generator();
405         m_outOfLineStreamIndex = std::nullopt;
406     }
407 }
408
409 void SpeculativeJIT::clearGenerationInfo()
410 {
411     for (unsigned i = 0; i < m_generationInfo.size(); ++i)
412         m_generationInfo[i] = GenerationInfo();
413     m_gprs = RegisterBank<GPRInfo>();
414     m_fprs = RegisterBank<FPRInfo>();
415 }
416
417 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForGPR(VirtualRegister spillMe, GPRReg source)
418 {
419     GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
420     Node* node = info.node();
421     DataFormat registerFormat = info.registerFormat();
422     ASSERT(registerFormat != DataFormatNone);
423     ASSERT(registerFormat != DataFormatDouble);
424         
425     SilentSpillAction spillAction;
426     SilentFillAction fillAction;
427         
428     if (!info.needsSpill())
429         spillAction = DoNothingForSpill;
430     else {
431 #if USE(JSVALUE64)
432         ASSERT(info.gpr() == source);
433         if (registerFormat == DataFormatInt32)
434             spillAction = Store32Payload;
435         else if (registerFormat == DataFormatCell || registerFormat == DataFormatStorage)
436             spillAction = StorePtr;
437         else if (registerFormat == DataFormatInt52 || registerFormat == DataFormatStrictInt52)
438             spillAction = Store64;
439         else {
440             ASSERT(registerFormat & DataFormatJS);
441             spillAction = Store64;
442         }
443 #elif USE(JSVALUE32_64)
444         if (registerFormat & DataFormatJS) {
445             ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
446             spillAction = source == info.tagGPR() ? Store32Tag : Store32Payload;
447         } else {
448             ASSERT(info.gpr() == source);
449             spillAction = Store32Payload;
450         }
451 #endif
452     }
453         
454     if (registerFormat == DataFormatInt32) {
455         ASSERT(info.gpr() == source);
456         ASSERT(isJSInt32(info.registerFormat()));
457         if (node->hasConstant()) {
458             ASSERT(node->isInt32Constant());
459             fillAction = SetInt32Constant;
460         } else
461             fillAction = Load32Payload;
462     } else if (registerFormat == DataFormatBoolean) {
463 #if USE(JSVALUE64)
464         RELEASE_ASSERT_NOT_REACHED();
465 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
466         fillAction = DoNothingForFill;
467 #endif
468 #elif USE(JSVALUE32_64)
469         ASSERT(info.gpr() == source);
470         if (node->hasConstant()) {
471             ASSERT(node->isBooleanConstant());
472             fillAction = SetBooleanConstant;
473         } else
474             fillAction = Load32Payload;
475 #endif
476     } else if (registerFormat == DataFormatCell) {
477         ASSERT(info.gpr() == source);
478         if (node->hasConstant()) {
479             DFG_ASSERT(m_jit.graph(), m_currentNode, node->isCellConstant());
480             node->asCell(); // To get the assertion.
481             fillAction = SetCellConstant;
482         } else {
483 #if USE(JSVALUE64)
484             fillAction = LoadPtr;
485 #else
486             fillAction = Load32Payload;
487 #endif
488         }
489     } else if (registerFormat == DataFormatStorage) {
490         ASSERT(info.gpr() == source);
491         fillAction = LoadPtr;
492     } else if (registerFormat == DataFormatInt52) {
493         if (node->hasConstant())
494             fillAction = SetInt52Constant;
495         else if (info.spillFormat() == DataFormatInt52)
496             fillAction = Load64;
497         else if (info.spillFormat() == DataFormatStrictInt52)
498             fillAction = Load64ShiftInt52Left;
499         else if (info.spillFormat() == DataFormatNone)
500             fillAction = Load64;
501         else {
502             RELEASE_ASSERT_NOT_REACHED();
503 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
504             fillAction = Load64; // Make GCC happy.
505 #endif
506         }
507     } else if (registerFormat == DataFormatStrictInt52) {
508         if (node->hasConstant())
509             fillAction = SetStrictInt52Constant;
510         else if (info.spillFormat() == DataFormatInt52)
511             fillAction = Load64ShiftInt52Right;
512         else if (info.spillFormat() == DataFormatStrictInt52)
513             fillAction = Load64;
514         else if (info.spillFormat() == DataFormatNone)
515             fillAction = Load64;
516         else {
517             RELEASE_ASSERT_NOT_REACHED();
518 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
519             fillAction = Load64; // Make GCC happy.
520 #endif
521         }
522     } else {
523         ASSERT(registerFormat & DataFormatJS);
524 #if USE(JSVALUE64)
525         ASSERT(info.gpr() == source);
526         if (node->hasConstant()) {
527             if (node->isCellConstant())
528                 fillAction = SetTrustedJSConstant;
529             else
530                 fillAction = SetJSConstant;
531         } else if (info.spillFormat() == DataFormatInt32) {
532             ASSERT(registerFormat == DataFormatJSInt32);
533             fillAction = Load32PayloadBoxInt;
534         } else
535             fillAction = Load64;
536 #else
537         ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
538         if (node->hasConstant())
539             fillAction = info.tagGPR() == source ? SetJSConstantTag : SetJSConstantPayload;
540         else if (info.payloadGPR() == source)
541             fillAction = Load32Payload;
542         else { // Fill the Tag
543             switch (info.spillFormat()) {
544             case DataFormatInt32:
545                 ASSERT(registerFormat == DataFormatJSInt32);
546                 fillAction = SetInt32Tag;
547                 break;
548             case DataFormatCell:
549                 ASSERT(registerFormat == DataFormatJSCell);
550                 fillAction = SetCellTag;
551                 break;
552             case DataFormatBoolean:
553                 ASSERT(registerFormat == DataFormatJSBoolean);
554                 fillAction = SetBooleanTag;
555                 break;
556             default:
557                 fillAction = Load32Tag;
558                 break;
559             }
560         }
561 #endif
562     }
563         
564     return SilentRegisterSavePlan(spillAction, fillAction, node, source);
565 }
566     
567 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForFPR(VirtualRegister spillMe, FPRReg source)
568 {
569     GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
570     Node* node = info.node();
571     ASSERT(info.registerFormat() == DataFormatDouble);
572
573     SilentSpillAction spillAction;
574     SilentFillAction fillAction;
575         
576     if (!info.needsSpill())
577         spillAction = DoNothingForSpill;
578     else {
579         ASSERT(!node->hasConstant());
580         ASSERT(info.spillFormat() == DataFormatNone);
581         ASSERT(info.fpr() == source);
582         spillAction = StoreDouble;
583     }
584         
585 #if USE(JSVALUE64)
586     if (node->hasConstant()) {
587         node->asNumber(); // To get the assertion.
588         fillAction = SetDoubleConstant;
589     } else {
590         ASSERT(info.spillFormat() == DataFormatNone || info.spillFormat() == DataFormatDouble);
591         fillAction = LoadDouble;
592     }
593 #elif USE(JSVALUE32_64)
594     ASSERT(info.registerFormat() == DataFormatDouble);
595     if (node->hasConstant()) {
596         node->asNumber(); // To get the assertion.
597         fillAction = SetDoubleConstant;
598     } else
599         fillAction = LoadDouble;
600 #endif
601
602     return SilentRegisterSavePlan(spillAction, fillAction, node, source);
603 }
604     
605 void SpeculativeJIT::silentSpill(const SilentRegisterSavePlan& plan)
606 {
607     switch (plan.spillAction()) {
608     case DoNothingForSpill:
609         break;
610     case Store32Tag:
611         m_jit.store32(plan.gpr(), JITCompiler::tagFor(plan.node()->virtualRegister()));
612         break;
613     case Store32Payload:
614         m_jit.store32(plan.gpr(), JITCompiler::payloadFor(plan.node()->virtualRegister()));
615         break;
616     case StorePtr:
617         m_jit.storePtr(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
618         break;
619 #if USE(JSVALUE64)
620     case Store64:
621         m_jit.store64(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
622         break;
623 #endif
624     case StoreDouble:
625         m_jit.storeDouble(plan.fpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
626         break;
627     default:
628         RELEASE_ASSERT_NOT_REACHED();
629     }
630 }
631     
632 void SpeculativeJIT::silentFill(const SilentRegisterSavePlan& plan)
633 {
634     switch (plan.fillAction()) {
635     case DoNothingForFill:
636         break;
637     case SetInt32Constant:
638         m_jit.move(Imm32(plan.node()->asInt32()), plan.gpr());
639         break;
640 #if USE(JSVALUE64)
641     case SetInt52Constant:
642         m_jit.move(Imm64(plan.node()->asAnyInt() << JSValue::int52ShiftAmount), plan.gpr());
643         break;
644     case SetStrictInt52Constant:
645         m_jit.move(Imm64(plan.node()->asAnyInt()), plan.gpr());
646         break;
647 #endif // USE(JSVALUE64)
648     case SetBooleanConstant:
649         m_jit.move(TrustedImm32(plan.node()->asBoolean()), plan.gpr());
650         break;
651     case SetCellConstant:
652         ASSERT(plan.node()->constant()->value().isCell());
653         m_jit.move(TrustedImmPtr(plan.node()->constant()), plan.gpr());
654         break;
655 #if USE(JSVALUE64)
656     case SetTrustedJSConstant:
657         m_jit.move(valueOfJSConstantAsImm64(plan.node()).asTrustedImm64(), plan.gpr());
658         break;
659     case SetJSConstant:
660         m_jit.move(valueOfJSConstantAsImm64(plan.node()), plan.gpr());
661         break;
662     case SetDoubleConstant:
663         m_jit.moveDouble(Imm64(reinterpretDoubleToInt64(plan.node()->asNumber())), plan.fpr());
664         break;
665     case Load32PayloadBoxInt:
666         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
667         m_jit.or64(GPRInfo::tagTypeNumberRegister, plan.gpr());
668         break;
669     case Load32PayloadConvertToInt52:
670         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
671         m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
672         m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
673         break;
674     case Load32PayloadSignExtend:
675         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
676         m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
677         break;
678 #else
679     case SetJSConstantTag:
680         m_jit.move(Imm32(plan.node()->asJSValue().tag()), plan.gpr());
681         break;
682     case SetJSConstantPayload:
683         m_jit.move(Imm32(plan.node()->asJSValue().payload()), plan.gpr());
684         break;
685     case SetInt32Tag:
686         m_jit.move(TrustedImm32(JSValue::Int32Tag), plan.gpr());
687         break;
688     case SetCellTag:
689         m_jit.move(TrustedImm32(JSValue::CellTag), plan.gpr());
690         break;
691     case SetBooleanTag:
692         m_jit.move(TrustedImm32(JSValue::BooleanTag), plan.gpr());
693         break;
694     case SetDoubleConstant:
695         m_jit.loadDouble(TrustedImmPtr(m_jit.addressOfDoubleConstant(plan.node())), plan.fpr());
696         break;
697 #endif
698     case Load32Tag:
699         m_jit.load32(JITCompiler::tagFor(plan.node()->virtualRegister()), plan.gpr());
700         break;
701     case Load32Payload:
702         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
703         break;
704     case LoadPtr:
705         m_jit.loadPtr(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
706         break;
707 #if USE(JSVALUE64)
708     case Load64:
709         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
710         break;
711     case Load64ShiftInt52Right:
712         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
713         m_jit.rshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
714         break;
715     case Load64ShiftInt52Left:
716         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
717         m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
718         break;
719 #endif
720     case LoadDouble:
721         m_jit.loadDouble(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.fpr());
722         break;
723     default:
724         RELEASE_ASSERT_NOT_REACHED();
725     }
726 }
727
728 JITCompiler::JumpList SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode)
729 {
730     JITCompiler::JumpList result;
731     
732     switch (arrayMode.type()) {
733     case Array::Int32:
734     case Array::Double:
735     case Array::Contiguous:
736     case Array::Undecided:
737     case Array::ArrayStorage: {
738         IndexingType shape = arrayMode.shapeMask();
739         switch (arrayMode.arrayClass()) {
740         case Array::OriginalArray:
741             RELEASE_ASSERT_NOT_REACHED();
742             return result;
743
744         case Array::Array:
745             m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
746             result.append(m_jit.branch32(
747                 MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | shape)));
748             return result;
749
750         case Array::NonArray:
751         case Array::OriginalNonArray:
752             m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
753             result.append(m_jit.branch32(
754                 MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape)));
755             return result;
756
757         case Array::PossiblyArray:
758             m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
759             result.append(m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape)));
760             return result;
761         }
762
763         RELEASE_ASSERT_NOT_REACHED();
764         return result;
765     }
766
767     case Array::SlowPutArrayStorage: {
768         ASSERT(!arrayMode.isJSArrayWithOriginalStructure());
769
770         switch (arrayMode.arrayClass()) {
771         case Array::OriginalArray:
772             RELEASE_ASSERT_NOT_REACHED();
773             return result;
774
775         case Array::Array:
776             result.append(
777                 m_jit.branchTest32(
778                     MacroAssembler::Zero, tempGPR, MacroAssembler::TrustedImm32(IsArray)));
779             break;
780
781         case Array::NonArray:
782         case Array::OriginalNonArray:
783             result.append(
784                 m_jit.branchTest32(
785                     MacroAssembler::NonZero, tempGPR, MacroAssembler::TrustedImm32(IsArray)));
786             break;
787
788         case Array::PossiblyArray:
789             break;
790         }
791
792         m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
793         m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
794         result.append(
795             m_jit.branch32(
796                 MacroAssembler::Above, tempGPR,
797                 TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
798         return result;
799     }
800     default:
801         CRASH();
802         break;
803     }
804     
805     return result;
806 }
807
808 void SpeculativeJIT::checkArray(Node* node)
809 {
810     ASSERT(node->arrayMode().isSpecific());
811     ASSERT(!node->arrayMode().doesConversion());
812     
813     SpeculateCellOperand base(this, node->child1());
814     GPRReg baseReg = base.gpr();
815     
816     if (node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1()))) {
817         noResult(m_currentNode);
818         return;
819     }
820     
821     switch (node->arrayMode().type()) {
822     case Array::AnyTypedArray:
823     case Array::String:
824         RELEASE_ASSERT_NOT_REACHED(); // Should have been a Phantom(String:)
825         return;
826     case Array::Int32:
827     case Array::Double:
828     case Array::Contiguous:
829     case Array::Undecided:
830     case Array::ArrayStorage:
831     case Array::SlowPutArrayStorage: {
832         GPRTemporary temp(this);
833         GPRReg tempGPR = temp.gpr();
834         m_jit.load8(MacroAssembler::Address(baseReg, JSCell::indexingTypeAndMiscOffset()), tempGPR);
835         speculationCheck(
836             BadIndexingType, JSValueSource::unboxedCell(baseReg), 0,
837             jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
838         
839         noResult(m_currentNode);
840         return;
841     }
842     case Array::DirectArguments:
843         speculateCellTypeWithoutTypeFiltering(node->child1(), baseReg, DirectArgumentsType);
844         noResult(m_currentNode);
845         return;
846     case Array::ScopedArguments:
847         speculateCellTypeWithoutTypeFiltering(node->child1(), baseReg, ScopedArgumentsType);
848         noResult(m_currentNode);
849         return;
850     default:
851         speculateCellTypeWithoutTypeFiltering(
852             node->child1(), baseReg,
853             typeForTypedArrayType(node->arrayMode().typedArrayType()));
854         noResult(m_currentNode);
855         return;
856     }
857 }
858
859 void SpeculativeJIT::arrayify(Node* node, GPRReg baseReg, GPRReg propertyReg)
860 {
861     ASSERT(node->arrayMode().doesConversion());
862     
863     GPRTemporary temp(this);
864     GPRTemporary structure;
865     GPRReg tempGPR = temp.gpr();
866     GPRReg structureGPR = InvalidGPRReg;
867     
868     if (node->op() != ArrayifyToStructure) {
869         GPRTemporary realStructure(this);
870         structure.adopt(realStructure);
871         structureGPR = structure.gpr();
872     }
873         
874     // We can skip all that comes next if we already have array storage.
875     MacroAssembler::JumpList slowPath;
876     
877     if (node->op() == ArrayifyToStructure) {
878         slowPath.append(m_jit.branchWeakStructure(
879             JITCompiler::NotEqual,
880             JITCompiler::Address(baseReg, JSCell::structureIDOffset()),
881             node->structure()));
882     } else {
883         m_jit.load8(
884             MacroAssembler::Address(baseReg, JSCell::indexingTypeAndMiscOffset()), tempGPR);
885         
886         slowPath.append(jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
887     }
888     
889     addSlowPathGenerator(std::make_unique<ArrayifySlowPathGenerator>(
890         slowPath, this, node, baseReg, propertyReg, tempGPR, structureGPR));
891     
892     noResult(m_currentNode);
893 }
894
895 void SpeculativeJIT::arrayify(Node* node)
896 {
897     ASSERT(node->arrayMode().isSpecific());
898     
899     SpeculateCellOperand base(this, node->child1());
900     
901     if (!node->child2()) {
902         arrayify(node, base.gpr(), InvalidGPRReg);
903         return;
904     }
905     
906     SpeculateInt32Operand property(this, node->child2());
907     
908     arrayify(node, base.gpr(), property.gpr());
909 }
910
911 GPRReg SpeculativeJIT::fillStorage(Edge edge)
912 {
913     VirtualRegister virtualRegister = edge->virtualRegister();
914     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
915     
916     switch (info.registerFormat()) {
917     case DataFormatNone: {
918         if (info.spillFormat() == DataFormatStorage) {
919             GPRReg gpr = allocate();
920             m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
921             m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr);
922             info.fillStorage(*m_stream, gpr);
923             return gpr;
924         }
925         
926         // Must be a cell; fill it as a cell and then return the pointer.
927         return fillSpeculateCell(edge);
928     }
929         
930     case DataFormatStorage: {
931         GPRReg gpr = info.gpr();
932         m_gprs.lock(gpr);
933         return gpr;
934     }
935         
936     default:
937         return fillSpeculateCell(edge);
938     }
939 }
940
941 void SpeculativeJIT::useChildren(Node* node)
942 {
943     if (node->flags() & NodeHasVarArgs) {
944         for (unsigned childIdx = node->firstChild(); childIdx < node->firstChild() + node->numChildren(); childIdx++) {
945             if (!!m_jit.graph().m_varArgChildren[childIdx])
946                 use(m_jit.graph().m_varArgChildren[childIdx]);
947         }
948     } else {
949         Edge child1 = node->child1();
950         if (!child1) {
951             ASSERT(!node->child2() && !node->child3());
952             return;
953         }
954         use(child1);
955         
956         Edge child2 = node->child2();
957         if (!child2) {
958             ASSERT(!node->child3());
959             return;
960         }
961         use(child2);
962         
963         Edge child3 = node->child3();
964         if (!child3)
965             return;
966         use(child3);
967     }
968 }
969
970 void SpeculativeJIT::compileTryGetById(Node* node)
971 {
972     switch (node->child1().useKind()) {
973     case CellUse: {
974         SpeculateCellOperand base(this, node->child1());
975         JSValueRegsTemporary result(this, Reuse, base);
976
977         JSValueRegs baseRegs = JSValueRegs::payloadOnly(base.gpr());
978         JSValueRegs resultRegs = result.regs();
979
980         base.use();
981
982         cachedGetById(node->origin.semantic, baseRegs, resultRegs, node->identifierNumber(), JITCompiler::Jump(), NeedToSpill, AccessType::TryGet);
983
984         jsValueResult(resultRegs, node, DataFormatJS, UseChildrenCalledExplicitly);
985         break;
986     }
987
988     case UntypedUse: {
989         JSValueOperand base(this, node->child1());
990         JSValueRegsTemporary result(this, Reuse, base);
991
992         JSValueRegs baseRegs = base.jsValueRegs();
993         JSValueRegs resultRegs = result.regs();
994
995         base.use();
996
997         JITCompiler::Jump notCell = m_jit.branchIfNotCell(baseRegs);
998
999         cachedGetById(node->origin.semantic, baseRegs, resultRegs, node->identifierNumber(), notCell, NeedToSpill, AccessType::TryGet);
1000
1001         jsValueResult(resultRegs, node, DataFormatJS, UseChildrenCalledExplicitly);
1002         break;
1003     }
1004
1005     default:
1006         DFG_CRASH(m_jit.graph(), node, "Bad use kind");
1007         break;
1008     } 
1009 }
1010
1011 void SpeculativeJIT::compileIn(Node* node)
1012 {
1013     SpeculateCellOperand base(this, node->child1());
1014     GPRReg baseGPR = base.gpr();
1015     
1016     if (JSString* string = node->child2()->dynamicCastConstant<JSString*>(*m_jit.vm())) {
1017         if (string->tryGetValueImpl() && string->tryGetValueImpl()->isAtomic()) {
1018             StructureStubInfo* stubInfo = m_jit.codeBlock()->addStubInfo(AccessType::In);
1019             
1020             GPRTemporary result(this);
1021             GPRReg resultGPR = result.gpr();
1022
1023             use(node->child2());
1024             
1025             MacroAssembler::PatchableJump jump = m_jit.patchableJump();
1026             MacroAssembler::Label done = m_jit.label();
1027             
1028             // Since this block is executed only when the result of string->tryGetValueImpl() is atomic,
1029             // we can cast it to const AtomicStringImpl* safely.
1030             auto slowPath = slowPathCall(
1031                 jump.m_jump, this, operationInOptimize,
1032                 JSValueRegs::payloadOnly(resultGPR), stubInfo, baseGPR,
1033                 static_cast<const AtomicStringImpl*>(string->tryGetValueImpl()));
1034             
1035             stubInfo->callSiteIndex = m_jit.addCallSite(node->origin.semantic);
1036             stubInfo->codeOrigin = node->origin.semantic;
1037             stubInfo->patch.baseGPR = static_cast<int8_t>(baseGPR);
1038             stubInfo->patch.valueGPR = static_cast<int8_t>(resultGPR);
1039             stubInfo->patch.thisGPR = static_cast<int8_t>(InvalidGPRReg);
1040 #if USE(JSVALUE32_64)
1041             stubInfo->patch.valueTagGPR = static_cast<int8_t>(InvalidGPRReg);
1042             stubInfo->patch.baseTagGPR = static_cast<int8_t>(InvalidGPRReg);
1043             stubInfo->patch.thisTagGPR = static_cast<int8_t>(InvalidGPRReg);
1044 #endif
1045             stubInfo->patch.usedRegisters = usedRegisters();
1046
1047             m_jit.addIn(InRecord(jump, done, slowPath.get(), stubInfo));
1048             addSlowPathGenerator(WTFMove(slowPath));
1049
1050             base.use();
1051
1052             blessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
1053             return;
1054         }
1055     }
1056
1057     JSValueOperand key(this, node->child2());
1058     JSValueRegs regs = key.jsValueRegs();
1059         
1060     GPRFlushedCallResult result(this);
1061     GPRReg resultGPR = result.gpr();
1062         
1063     base.use();
1064     key.use();
1065         
1066     flushRegisters();
1067     callOperation(
1068         operationGenericIn, extractResult(JSValueRegs::payloadOnly(resultGPR)),
1069         baseGPR, regs);
1070     m_jit.exceptionCheck();
1071     blessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
1072 }
1073
1074 void SpeculativeJIT::compileDeleteById(Node* node)
1075 {
1076     JSValueOperand value(this, node->child1());
1077     GPRFlushedCallResult result(this);
1078
1079     JSValueRegs valueRegs = value.jsValueRegs();
1080     GPRReg resultGPR = result.gpr();
1081
1082     value.use();
1083
1084     flushRegisters();
1085     callOperation(operationDeleteById, resultGPR, valueRegs, identifierUID(node->identifierNumber()));
1086     m_jit.exceptionCheck();
1087
1088     unblessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
1089 }
1090
1091 void SpeculativeJIT::compileDeleteByVal(Node* node)
1092 {
1093     JSValueOperand base(this, node->child1());
1094     JSValueOperand key(this, node->child2());
1095     GPRFlushedCallResult result(this);
1096
1097     JSValueRegs baseRegs = base.jsValueRegs();
1098     JSValueRegs keyRegs = key.jsValueRegs();
1099     GPRReg resultGPR = result.gpr();
1100
1101     base.use();
1102     key.use();
1103
1104     flushRegisters();
1105     callOperation(operationDeleteByVal, resultGPR, baseRegs, keyRegs);
1106     m_jit.exceptionCheck();
1107
1108     unblessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
1109 }
1110
1111 void SpeculativeJIT::compilePushWithScope(Node* node)
1112 {
1113     SpeculateCellOperand currentScope(this, node->child1());
1114     GPRReg currentScopeGPR = currentScope.gpr();
1115
1116     GPRFlushedCallResult result(this);
1117     GPRReg resultGPR = result.gpr();
1118
1119     auto objectEdge = node->child2();
1120     if (objectEdge.useKind() == ObjectUse) {
1121         SpeculateCellOperand object(this, objectEdge);
1122         GPRReg objectGPR = object.gpr();
1123         speculateObject(objectEdge, objectGPR);
1124
1125         flushRegisters();
1126         callOperation(operationPushWithScopeObject, resultGPR, currentScopeGPR, objectGPR);
1127         // No exception check here as we did not have to call toObject().
1128     } else {
1129         ASSERT(objectEdge.useKind() == UntypedUse);
1130         JSValueOperand object(this, objectEdge);
1131         JSValueRegs objectRegs = object.jsValueRegs();
1132
1133         flushRegisters();
1134         callOperation(operationPushWithScope, resultGPR, currentScopeGPR, objectRegs);
1135         m_jit.exceptionCheck();
1136     }
1137     
1138     cellResult(resultGPR, node);
1139 }
1140
1141 bool SpeculativeJIT::nonSpeculativeCompare(Node* node, MacroAssembler::RelationalCondition cond, S_JITOperation_EJJ helperFunction)
1142 {
1143     unsigned branchIndexInBlock = detectPeepHoleBranch();
1144     if (branchIndexInBlock != UINT_MAX) {
1145         Node* branchNode = m_block->at(branchIndexInBlock);
1146
1147         ASSERT(node->adjustedRefCount() == 1);
1148         
1149         nonSpeculativePeepholeBranch(node, branchNode, cond, helperFunction);
1150     
1151         m_indexInBlock = branchIndexInBlock;
1152         m_currentNode = branchNode;
1153         
1154         return true;
1155     }
1156     
1157     nonSpeculativeNonPeepholeCompare(node, cond, helperFunction);
1158     
1159     return false;
1160 }
1161
1162 bool SpeculativeJIT::nonSpeculativeStrictEq(Node* node, bool invert)
1163 {
1164     unsigned branchIndexInBlock = detectPeepHoleBranch();
1165     if (branchIndexInBlock != UINT_MAX) {
1166         Node* branchNode = m_block->at(branchIndexInBlock);
1167
1168         ASSERT(node->adjustedRefCount() == 1);
1169         
1170         nonSpeculativePeepholeStrictEq(node, branchNode, invert);
1171     
1172         m_indexInBlock = branchIndexInBlock;
1173         m_currentNode = branchNode;
1174         
1175         return true;
1176     }
1177     
1178     nonSpeculativeNonPeepholeStrictEq(node, invert);
1179     
1180     return false;
1181 }
1182
1183 static const char* dataFormatString(DataFormat format)
1184 {
1185     // These values correspond to the DataFormat enum.
1186     const char* strings[] = {
1187         "[  ]",
1188         "[ i]",
1189         "[ d]",
1190         "[ c]",
1191         "Err!",
1192         "Err!",
1193         "Err!",
1194         "Err!",
1195         "[J ]",
1196         "[Ji]",
1197         "[Jd]",
1198         "[Jc]",
1199         "Err!",
1200         "Err!",
1201         "Err!",
1202         "Err!",
1203     };
1204     return strings[format];
1205 }
1206
1207 void SpeculativeJIT::dump(const char* label)
1208 {
1209     if (label)
1210         dataLogF("<%s>\n", label);
1211
1212     dataLogF("  gprs:\n");
1213     m_gprs.dump();
1214     dataLogF("  fprs:\n");
1215     m_fprs.dump();
1216     dataLogF("  VirtualRegisters:\n");
1217     for (unsigned i = 0; i < m_generationInfo.size(); ++i) {
1218         GenerationInfo& info = m_generationInfo[i];
1219         if (info.alive())
1220             dataLogF("    % 3d:%s%s", i, dataFormatString(info.registerFormat()), dataFormatString(info.spillFormat()));
1221         else
1222             dataLogF("    % 3d:[__][__]", i);
1223         if (info.registerFormat() == DataFormatDouble)
1224             dataLogF(":fpr%d\n", info.fpr());
1225         else if (info.registerFormat() != DataFormatNone
1226 #if USE(JSVALUE32_64)
1227             && !(info.registerFormat() & DataFormatJS)
1228 #endif
1229             ) {
1230             ASSERT(info.gpr() != InvalidGPRReg);
1231             dataLogF(":%s\n", GPRInfo::debugName(info.gpr()));
1232         } else
1233             dataLogF("\n");
1234     }
1235     if (label)
1236         dataLogF("</%s>\n", label);
1237 }
1238
1239 GPRTemporary::GPRTemporary()
1240     : m_jit(0)
1241     , m_gpr(InvalidGPRReg)
1242 {
1243 }
1244
1245 GPRTemporary::GPRTemporary(SpeculativeJIT* jit)
1246     : m_jit(jit)
1247     , m_gpr(InvalidGPRReg)
1248 {
1249     m_gpr = m_jit->allocate();
1250 }
1251
1252 GPRTemporary::GPRTemporary(SpeculativeJIT* jit, GPRReg specific)
1253     : m_jit(jit)
1254     , m_gpr(InvalidGPRReg)
1255 {
1256     m_gpr = m_jit->allocate(specific);
1257 }
1258
1259 #if USE(JSVALUE32_64)
1260 GPRTemporary::GPRTemporary(
1261     SpeculativeJIT* jit, ReuseTag, JSValueOperand& op1, WhichValueWord which)
1262     : m_jit(jit)
1263     , m_gpr(InvalidGPRReg)
1264 {
1265     if (!op1.isDouble() && m_jit->canReuse(op1.node()))
1266         m_gpr = m_jit->reuse(op1.gpr(which));
1267     else
1268         m_gpr = m_jit->allocate();
1269 }
1270 #endif // USE(JSVALUE32_64)
1271
1272 JSValueRegsTemporary::JSValueRegsTemporary() { }
1273
1274 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit)
1275 #if USE(JSVALUE64)
1276     : m_gpr(jit)
1277 #else
1278     : m_payloadGPR(jit)
1279     , m_tagGPR(jit)
1280 #endif
1281 {
1282 }
1283
1284 #if USE(JSVALUE64)
1285 template<typename T>
1286 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit, ReuseTag, T& operand, WhichValueWord)
1287     : m_gpr(jit, Reuse, operand)
1288 {
1289 }
1290 #else
1291 template<typename T>
1292 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit, ReuseTag, T& operand, WhichValueWord resultWord)
1293 {
1294     if (resultWord == PayloadWord) {
1295         m_payloadGPR = GPRTemporary(jit, Reuse, operand);
1296         m_tagGPR = GPRTemporary(jit);
1297     } else {
1298         m_payloadGPR = GPRTemporary(jit);
1299         m_tagGPR = GPRTemporary(jit, Reuse, operand);
1300     }
1301 }
1302 #endif
1303
1304 #if USE(JSVALUE64)
1305 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit, ReuseTag, JSValueOperand& operand)
1306 {
1307     m_gpr = GPRTemporary(jit, Reuse, operand);
1308 }
1309 #else
1310 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit, ReuseTag, JSValueOperand& operand)
1311 {
1312     if (jit->canReuse(operand.node())) {
1313         m_payloadGPR = GPRTemporary(jit, Reuse, operand, PayloadWord);
1314         m_tagGPR = GPRTemporary(jit, Reuse, operand, TagWord);
1315     } else {
1316         m_payloadGPR = GPRTemporary(jit);
1317         m_tagGPR = GPRTemporary(jit);
1318     }
1319 }
1320 #endif
1321
1322 JSValueRegsTemporary::~JSValueRegsTemporary() { }
1323
1324 JSValueRegs JSValueRegsTemporary::regs()
1325 {
1326 #if USE(JSVALUE64)
1327     return JSValueRegs(m_gpr.gpr());
1328 #else
1329     return JSValueRegs(m_tagGPR.gpr(), m_payloadGPR.gpr());
1330 #endif
1331 }
1332
1333 void GPRTemporary::adopt(GPRTemporary& other)
1334 {
1335     ASSERT(!m_jit);
1336     ASSERT(m_gpr == InvalidGPRReg);
1337     ASSERT(other.m_jit);
1338     ASSERT(other.m_gpr != InvalidGPRReg);
1339     m_jit = other.m_jit;
1340     m_gpr = other.m_gpr;
1341     other.m_jit = 0;
1342     other.m_gpr = InvalidGPRReg;
1343 }
1344
1345 FPRTemporary::FPRTemporary(FPRTemporary&& other)
1346 {
1347     ASSERT(other.m_jit);
1348     ASSERT(other.m_fpr != InvalidFPRReg);
1349     m_jit = other.m_jit;
1350     m_fpr = other.m_fpr;
1351
1352     other.m_jit = nullptr;
1353 }
1354
1355 FPRTemporary::FPRTemporary(SpeculativeJIT* jit)
1356     : m_jit(jit)
1357     , m_fpr(InvalidFPRReg)
1358 {
1359     m_fpr = m_jit->fprAllocate();
1360 }
1361
1362 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1)
1363     : m_jit(jit)
1364     , m_fpr(InvalidFPRReg)
1365 {
1366     if (m_jit->canReuse(op1.node()))
1367         m_fpr = m_jit->reuse(op1.fpr());
1368     else
1369         m_fpr = m_jit->fprAllocate();
1370 }
1371
1372 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1, SpeculateDoubleOperand& op2)
1373     : m_jit(jit)
1374     , m_fpr(InvalidFPRReg)
1375 {
1376     if (m_jit->canReuse(op1.node()))
1377         m_fpr = m_jit->reuse(op1.fpr());
1378     else if (m_jit->canReuse(op2.node()))
1379         m_fpr = m_jit->reuse(op2.fpr());
1380     else if (m_jit->canReuse(op1.node(), op2.node()) && op1.fpr() == op2.fpr())
1381         m_fpr = m_jit->reuse(op1.fpr());
1382     else
1383         m_fpr = m_jit->fprAllocate();
1384 }
1385
1386 #if USE(JSVALUE32_64)
1387 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, JSValueOperand& op1)
1388     : m_jit(jit)
1389     , m_fpr(InvalidFPRReg)
1390 {
1391     if (op1.isDouble() && m_jit->canReuse(op1.node()))
1392         m_fpr = m_jit->reuse(op1.fpr());
1393     else
1394         m_fpr = m_jit->fprAllocate();
1395 }
1396 #endif
1397
1398 void SpeculativeJIT::compilePeepHoleDoubleBranch(Node* node, Node* branchNode, JITCompiler::DoubleCondition condition)
1399 {
1400     BasicBlock* taken = branchNode->branchData()->taken.block;
1401     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1402
1403     if (taken == nextBlock()) {
1404         condition = MacroAssembler::invert(condition);
1405         std::swap(taken, notTaken);
1406     }
1407
1408     SpeculateDoubleOperand op1(this, node->child1());
1409     SpeculateDoubleOperand op2(this, node->child2());
1410     
1411     branchDouble(condition, op1.fpr(), op2.fpr(), taken);
1412     jump(notTaken);
1413 }
1414
1415 void SpeculativeJIT::compilePeepHoleObjectEquality(Node* node, Node* branchNode)
1416 {
1417     BasicBlock* taken = branchNode->branchData()->taken.block;
1418     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1419
1420     MacroAssembler::RelationalCondition condition = MacroAssembler::Equal;
1421     
1422     if (taken == nextBlock()) {
1423         condition = MacroAssembler::NotEqual;
1424         BasicBlock* tmp = taken;
1425         taken = notTaken;
1426         notTaken = tmp;
1427     }
1428
1429     SpeculateCellOperand op1(this, node->child1());
1430     SpeculateCellOperand op2(this, node->child2());
1431     
1432     GPRReg op1GPR = op1.gpr();
1433     GPRReg op2GPR = op2.gpr();
1434     
1435     if (masqueradesAsUndefinedWatchpointIsStillValid()) {
1436         if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1437             speculationCheck(
1438                 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(), m_jit.branchIfNotObject(op1GPR));
1439         }
1440         if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1441             speculationCheck(
1442                 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(), m_jit.branchIfNotObject(op2GPR));
1443         }
1444     } else {
1445         if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1446             speculationCheck(
1447                 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1448                 m_jit.branchIfNotObject(op1GPR));
1449         }
1450         speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1451             m_jit.branchTest8(
1452                 MacroAssembler::NonZero, 
1453                 MacroAssembler::Address(op1GPR, JSCell::typeInfoFlagsOffset()), 
1454                 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1455
1456         if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1457             speculationCheck(
1458                 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1459                 m_jit.branchIfNotObject(op2GPR));
1460         }
1461         speculationCheck(BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1462             m_jit.branchTest8(
1463                 MacroAssembler::NonZero, 
1464                 MacroAssembler::Address(op2GPR, JSCell::typeInfoFlagsOffset()), 
1465                 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1466     }
1467
1468     branchPtr(condition, op1GPR, op2GPR, taken);
1469     jump(notTaken);
1470 }
1471
1472 void SpeculativeJIT::compilePeepHoleBooleanBranch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1473 {
1474     BasicBlock* taken = branchNode->branchData()->taken.block;
1475     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1476
1477     // The branch instruction will branch to the taken block.
1478     // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1479     if (taken == nextBlock()) {
1480         condition = JITCompiler::invert(condition);
1481         BasicBlock* tmp = taken;
1482         taken = notTaken;
1483         notTaken = tmp;
1484     }
1485
1486     if (node->child1()->isInt32Constant()) {
1487         int32_t imm = node->child1()->asInt32();
1488         SpeculateBooleanOperand op2(this, node->child2());
1489         branch32(condition, JITCompiler::Imm32(imm), op2.gpr(), taken);
1490     } else if (node->child2()->isInt32Constant()) {
1491         SpeculateBooleanOperand op1(this, node->child1());
1492         int32_t imm = node->child2()->asInt32();
1493         branch32(condition, op1.gpr(), JITCompiler::Imm32(imm), taken);
1494     } else {
1495         SpeculateBooleanOperand op1(this, node->child1());
1496         SpeculateBooleanOperand op2(this, node->child2());
1497         branch32(condition, op1.gpr(), op2.gpr(), taken);
1498     }
1499
1500     jump(notTaken);
1501 }
1502
1503 void SpeculativeJIT::compileStringSlice(Node* node)
1504 {
1505     SpeculateCellOperand string(this, node->child1());
1506     GPRTemporary startIndex(this);
1507     GPRTemporary temp(this);
1508     GPRTemporary temp2(this);
1509
1510     GPRReg stringGPR = string.gpr();
1511     GPRReg startIndexGPR = startIndex.gpr();
1512     GPRReg tempGPR = temp.gpr();
1513     GPRReg temp2GPR = temp2.gpr();
1514
1515     speculateString(node->child1(), stringGPR);
1516
1517     {
1518         m_jit.load32(JITCompiler::Address(stringGPR, JSString::offsetOfLength()), temp2GPR);
1519
1520         emitPopulateSliceIndex(node->child2(), temp2GPR, startIndexGPR);
1521         if (node->child3())
1522             emitPopulateSliceIndex(node->child3(), temp2GPR, tempGPR);
1523         else
1524             m_jit.move(temp2GPR, tempGPR);
1525     }
1526
1527     CCallHelpers::JumpList doneCases;
1528     CCallHelpers::JumpList slowCases;
1529
1530     auto nonEmptyCase = m_jit.branch32(MacroAssembler::Below, startIndexGPR, tempGPR);
1531     m_jit.move(TrustedImmPtr::weakPointer(m_jit.graph(), jsEmptyString(&vm())), tempGPR);
1532     doneCases.append(m_jit.jump());
1533
1534     nonEmptyCase.link(&m_jit);
1535     m_jit.sub32(startIndexGPR, tempGPR); // the size of the sliced string.
1536     slowCases.append(m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(1)));
1537
1538     m_jit.loadPtr(MacroAssembler::Address(stringGPR, JSString::offsetOfValue()), temp2GPR);
1539     slowCases.append(m_jit.branchTestPtr(MacroAssembler::Zero, temp2GPR));
1540
1541     m_jit.loadPtr(MacroAssembler::Address(temp2GPR, StringImpl::dataOffset()), tempGPR);
1542
1543     // Load the character into scratchReg
1544     m_jit.zeroExtend32ToPtr(startIndexGPR, startIndexGPR);
1545     auto is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(temp2GPR, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
1546
1547     m_jit.load8(MacroAssembler::BaseIndex(tempGPR, startIndexGPR, MacroAssembler::TimesOne, 0), tempGPR);
1548     auto cont8Bit = m_jit.jump();
1549
1550     is16Bit.link(&m_jit);
1551     m_jit.load16(MacroAssembler::BaseIndex(tempGPR, startIndexGPR, MacroAssembler::TimesTwo, 0), tempGPR);
1552
1553     auto bigCharacter = m_jit.branch32(MacroAssembler::AboveOrEqual, tempGPR, TrustedImm32(0x100));
1554
1555     // 8 bit string values don't need the isASCII check.
1556     cont8Bit.link(&m_jit);
1557
1558     m_jit.lshift32(MacroAssembler::TrustedImm32(sizeof(void*) == 4 ? 2 : 3), tempGPR);
1559     m_jit.addPtr(TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), tempGPR);
1560     m_jit.loadPtr(tempGPR, tempGPR);
1561
1562     addSlowPathGenerator(
1563         slowPathCall(
1564             bigCharacter, this, operationSingleCharacterString, tempGPR, tempGPR));
1565
1566     addSlowPathGenerator(
1567         slowPathCall(
1568             slowCases, this, operationStringSubstr, tempGPR, stringGPR, startIndexGPR, tempGPR));
1569
1570     doneCases.link(&m_jit);
1571     cellResult(tempGPR, node);
1572 }
1573
1574 void SpeculativeJIT::compileToLowerCase(Node* node)
1575 {
1576     ASSERT(node->op() == ToLowerCase);
1577     SpeculateCellOperand string(this, node->child1());
1578     GPRTemporary temp(this);
1579     GPRTemporary index(this);
1580     GPRTemporary charReg(this);
1581     GPRTemporary length(this);
1582
1583     GPRReg stringGPR = string.gpr();
1584     GPRReg tempGPR = temp.gpr();
1585     GPRReg indexGPR = index.gpr();
1586     GPRReg charGPR = charReg.gpr();
1587     GPRReg lengthGPR = length.gpr();
1588
1589     speculateString(node->child1(), stringGPR);
1590
1591     CCallHelpers::JumpList slowPath;
1592
1593     m_jit.move(TrustedImmPtr(nullptr), indexGPR);
1594
1595     m_jit.loadPtr(MacroAssembler::Address(stringGPR, JSString::offsetOfValue()), tempGPR);
1596     slowPath.append(m_jit.branchTestPtr(MacroAssembler::Zero, tempGPR));
1597
1598     slowPath.append(m_jit.branchTest32(
1599         MacroAssembler::Zero, MacroAssembler::Address(tempGPR, StringImpl::flagsOffset()),
1600         MacroAssembler::TrustedImm32(StringImpl::flagIs8Bit())));
1601     m_jit.load32(MacroAssembler::Address(tempGPR, StringImpl::lengthMemoryOffset()), lengthGPR);
1602     m_jit.loadPtr(MacroAssembler::Address(tempGPR, StringImpl::dataOffset()), tempGPR);
1603
1604     auto loopStart = m_jit.label();
1605     auto loopDone = m_jit.branch32(CCallHelpers::AboveOrEqual, indexGPR, lengthGPR);
1606     m_jit.load8(MacroAssembler::BaseIndex(tempGPR, indexGPR, MacroAssembler::TimesOne), charGPR);
1607     slowPath.append(m_jit.branchTest32(CCallHelpers::NonZero, charGPR, TrustedImm32(~0x7F)));
1608     m_jit.sub32(TrustedImm32('A'), charGPR);
1609     slowPath.append(m_jit.branch32(CCallHelpers::BelowOrEqual, charGPR, TrustedImm32('Z' - 'A')));
1610
1611     m_jit.add32(TrustedImm32(1), indexGPR);
1612     m_jit.jump().linkTo(loopStart, &m_jit);
1613     
1614     slowPath.link(&m_jit);
1615     silentSpillAllRegisters(lengthGPR);
1616     callOperation(operationToLowerCase, lengthGPR, stringGPR, indexGPR);
1617     silentFillAllRegisters();
1618     m_jit.exceptionCheck();
1619     auto done = m_jit.jump();
1620
1621     loopDone.link(&m_jit);
1622     m_jit.move(stringGPR, lengthGPR);
1623
1624     done.link(&m_jit);
1625     cellResult(lengthGPR, node);
1626 }
1627
1628 void SpeculativeJIT::compilePeepHoleInt32Branch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1629 {
1630     BasicBlock* taken = branchNode->branchData()->taken.block;
1631     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1632
1633     // The branch instruction will branch to the taken block.
1634     // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1635     if (taken == nextBlock()) {
1636         condition = JITCompiler::invert(condition);
1637         BasicBlock* tmp = taken;
1638         taken = notTaken;
1639         notTaken = tmp;
1640     }
1641
1642     if (node->child1()->isInt32Constant()) {
1643         int32_t imm = node->child1()->asInt32();
1644         SpeculateInt32Operand op2(this, node->child2());
1645         branch32(condition, JITCompiler::Imm32(imm), op2.gpr(), taken);
1646     } else if (node->child2()->isInt32Constant()) {
1647         SpeculateInt32Operand op1(this, node->child1());
1648         int32_t imm = node->child2()->asInt32();
1649         branch32(condition, op1.gpr(), JITCompiler::Imm32(imm), taken);
1650     } else {
1651         SpeculateInt32Operand op1(this, node->child1());
1652         SpeculateInt32Operand op2(this, node->child2());
1653         branch32(condition, op1.gpr(), op2.gpr(), taken);
1654     }
1655
1656     jump(notTaken);
1657 }
1658
1659 // Returns true if the compare is fused with a subsequent branch.
1660 bool SpeculativeJIT::compilePeepHoleBranch(Node* node, MacroAssembler::RelationalCondition condition, MacroAssembler::DoubleCondition doubleCondition, S_JITOperation_EJJ operation)
1661 {
1662     // Fused compare & branch.
1663     unsigned branchIndexInBlock = detectPeepHoleBranch();
1664     if (branchIndexInBlock != UINT_MAX) {
1665         Node* branchNode = m_block->at(branchIndexInBlock);
1666
1667         // detectPeepHoleBranch currently only permits the branch to be the very next node,
1668         // so can be no intervening nodes to also reference the compare. 
1669         ASSERT(node->adjustedRefCount() == 1);
1670
1671         if (node->isBinaryUseKind(Int32Use))
1672             compilePeepHoleInt32Branch(node, branchNode, condition);
1673 #if USE(JSVALUE64)
1674         else if (node->isBinaryUseKind(Int52RepUse))
1675             compilePeepHoleInt52Branch(node, branchNode, condition);
1676 #endif // USE(JSVALUE64)
1677         else if (node->isBinaryUseKind(StringUse) || node->isBinaryUseKind(StringIdentUse)) {
1678             // Use non-peephole comparison, for now.
1679             return false;
1680         } else if (node->isBinaryUseKind(DoubleRepUse))
1681             compilePeepHoleDoubleBranch(node, branchNode, doubleCondition);
1682         else if (node->op() == CompareEq) {
1683             if (node->isBinaryUseKind(BooleanUse))
1684                 compilePeepHoleBooleanBranch(node, branchNode, condition);
1685             else if (node->isBinaryUseKind(SymbolUse))
1686                 compilePeepHoleSymbolEquality(node, branchNode);
1687             else if (node->isBinaryUseKind(ObjectUse))
1688                 compilePeepHoleObjectEquality(node, branchNode);
1689             else if (node->isBinaryUseKind(ObjectUse, ObjectOrOtherUse))
1690                 compilePeepHoleObjectToObjectOrOtherEquality(node->child1(), node->child2(), branchNode);
1691             else if (node->isBinaryUseKind(ObjectOrOtherUse, ObjectUse))
1692                 compilePeepHoleObjectToObjectOrOtherEquality(node->child2(), node->child1(), branchNode);
1693             else if (!needsTypeCheck(node->child1(), SpecOther))
1694                 nonSpeculativePeepholeBranchNullOrUndefined(node->child2(), branchNode);
1695             else if (!needsTypeCheck(node->child2(), SpecOther))
1696                 nonSpeculativePeepholeBranchNullOrUndefined(node->child1(), branchNode);
1697             else {
1698                 nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1699                 return true;
1700             }
1701         } else {
1702             nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1703             return true;
1704         }
1705
1706         use(node->child1());
1707         use(node->child2());
1708         m_indexInBlock = branchIndexInBlock;
1709         m_currentNode = branchNode;
1710         return true;
1711     }
1712     return false;
1713 }
1714
1715 void SpeculativeJIT::noticeOSRBirth(Node* node)
1716 {
1717     if (!node->hasVirtualRegister())
1718         return;
1719     
1720     VirtualRegister virtualRegister = node->virtualRegister();
1721     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1722     
1723     info.noticeOSRBirth(*m_stream, node, virtualRegister);
1724 }
1725
1726 void SpeculativeJIT::compileMovHint(Node* node)
1727 {
1728     ASSERT(node->containsMovHint() && node->op() != ZombieHint);
1729     
1730     Node* child = node->child1().node();
1731     noticeOSRBirth(child);
1732     
1733     m_stream->appendAndLog(VariableEvent::movHint(MinifiedID(child), node->unlinkedLocal()));
1734 }
1735
1736 void SpeculativeJIT::bail(AbortReason reason)
1737 {
1738     if (verboseCompilationEnabled())
1739         dataLog("Bailing compilation.\n");
1740     m_compileOkay = true;
1741     m_jit.abortWithReason(reason, m_lastGeneratedNode);
1742     clearGenerationInfo();
1743 }
1744
1745 void SpeculativeJIT::compileCurrentBlock()
1746 {
1747     ASSERT(m_compileOkay);
1748     
1749     if (!m_block)
1750         return;
1751     
1752     ASSERT(m_block->isReachable);
1753     
1754     m_jit.blockHeads()[m_block->index] = m_jit.label();
1755
1756     if (!m_block->intersectionOfCFAHasVisited) {
1757         // Don't generate code for basic blocks that are unreachable according to CFA.
1758         // But to be sure that nobody has generated a jump to this block, drop in a
1759         // breakpoint here.
1760         m_jit.abortWithReason(DFGUnreachableBasicBlock);
1761         return;
1762     }
1763
1764     if (m_block->isCatchEntrypoint) {
1765         m_jit.addPtr(CCallHelpers::TrustedImm32(-(m_jit.graph().frameRegisterCount() * sizeof(Register))), GPRInfo::callFrameRegister,  CCallHelpers::stackPointerRegister);
1766         if (Options::zeroStackFrame())
1767             m_jit.clearStackFrame(GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister, GPRInfo::regT0, m_jit.graph().frameRegisterCount() * sizeof(Register));
1768         m_jit.emitSaveCalleeSaves();
1769         m_jit.emitMaterializeTagCheckRegisters();
1770         m_jit.emitPutToCallFrameHeader(m_jit.codeBlock(), CallFrameSlot::codeBlock);
1771     }
1772
1773     m_stream->appendAndLog(VariableEvent::reset());
1774     
1775     m_jit.jitAssertHasValidCallFrame();
1776     m_jit.jitAssertTagsInPlace();
1777     m_jit.jitAssertArgumentCountSane();
1778
1779     m_state.reset();
1780     m_state.beginBasicBlock(m_block);
1781     
1782     for (size_t i = m_block->variablesAtHead.size(); i--;) {
1783         int operand = m_block->variablesAtHead.operandForIndex(i);
1784         Node* node = m_block->variablesAtHead[i];
1785         if (!node)
1786             continue; // No need to record dead SetLocal's.
1787         
1788         VariableAccessData* variable = node->variableAccessData();
1789         DataFormat format;
1790         if (!node->refCount())
1791             continue; // No need to record dead SetLocal's.
1792         format = dataFormatFor(variable->flushFormat());
1793         m_stream->appendAndLog(
1794             VariableEvent::setLocal(
1795                 VirtualRegister(operand),
1796                 variable->machineLocal(),
1797                 format));
1798     }
1799
1800     m_origin = NodeOrigin();
1801     
1802     for (m_indexInBlock = 0; m_indexInBlock < m_block->size(); ++m_indexInBlock) {
1803         m_currentNode = m_block->at(m_indexInBlock);
1804         
1805         // We may have hit a contradiction that the CFA was aware of but that the JIT
1806         // didn't cause directly.
1807         if (!m_state.isValid()) {
1808             bail(DFGBailedAtTopOfBlock);
1809             return;
1810         }
1811
1812         m_interpreter.startExecuting();
1813         m_interpreter.executeKnownEdgeTypes(m_currentNode);
1814         m_jit.setForNode(m_currentNode);
1815         m_origin = m_currentNode->origin;
1816         if (validationEnabled())
1817             m_origin.exitOK &= mayExit(m_jit.graph(), m_currentNode) == Exits;
1818         m_lastGeneratedNode = m_currentNode->op();
1819         
1820         ASSERT(m_currentNode->shouldGenerate());
1821         
1822         if (verboseCompilationEnabled()) {
1823             dataLogF(
1824                 "SpeculativeJIT generating Node @%d (bc#%u) at JIT offset 0x%x",
1825                 (int)m_currentNode->index(),
1826                 m_currentNode->origin.semantic.bytecodeIndex, m_jit.debugOffset());
1827             dataLog("\n");
1828         }
1829
1830         if (Options::validateDFGExceptionHandling() && (mayExit(m_jit.graph(), m_currentNode) != DoesNotExit || m_currentNode->isTerminal()))
1831             m_jit.jitReleaseAssertNoException(*m_jit.vm());
1832
1833         m_jit.pcToCodeOriginMapBuilder().appendItem(m_jit.labelIgnoringWatchpoints(), m_origin.semantic);
1834
1835         compile(m_currentNode);
1836         
1837         if (belongsInMinifiedGraph(m_currentNode->op()))
1838             m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode));
1839         
1840 #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
1841         m_jit.clearRegisterAllocationOffsets();
1842 #endif
1843         
1844         if (!m_compileOkay) {
1845             bail(DFGBailedAtEndOfNode);
1846             return;
1847         }
1848         
1849         // Make sure that the abstract state is rematerialized for the next node.
1850         m_interpreter.executeEffects(m_indexInBlock);
1851     }
1852     
1853     // Perform the most basic verification that children have been used correctly.
1854     if (!ASSERT_DISABLED) {
1855         for (auto& info : m_generationInfo)
1856             RELEASE_ASSERT(!info.alive());
1857     }
1858 }
1859
1860 // If we are making type predictions about our arguments then
1861 // we need to check that they are correct on function entry.
1862 void SpeculativeJIT::checkArgumentTypes()
1863 {
1864     ASSERT(!m_currentNode);
1865     m_origin = NodeOrigin(CodeOrigin(0), CodeOrigin(0), true);
1866
1867     auto& arguments = m_jit.graph().m_rootToArguments.find(m_jit.graph().block(0))->value;
1868     for (int i = 0; i < m_jit.codeBlock()->numParameters(); ++i) {
1869         Node* node = arguments[i];
1870         if (!node) {
1871             // The argument is dead. We don't do any checks for such arguments.
1872             continue;
1873         }
1874         
1875         ASSERT(node->op() == SetArgument);
1876         ASSERT(node->shouldGenerate());
1877
1878         VariableAccessData* variableAccessData = node->variableAccessData();
1879         FlushFormat format = variableAccessData->flushFormat();
1880         
1881         if (format == FlushedJSValue)
1882             continue;
1883         
1884         VirtualRegister virtualRegister = variableAccessData->local();
1885
1886         JSValueSource valueSource = JSValueSource(JITCompiler::addressFor(virtualRegister));
1887         
1888 #if USE(JSVALUE64)
1889         switch (format) {
1890         case FlushedInt32: {
1891             speculationCheck(BadType, valueSource, node, m_jit.branch64(MacroAssembler::Below, JITCompiler::addressFor(virtualRegister), GPRInfo::tagTypeNumberRegister));
1892             break;
1893         }
1894         case FlushedBoolean: {
1895             GPRTemporary temp(this);
1896             m_jit.load64(JITCompiler::addressFor(virtualRegister), temp.gpr());
1897             m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), temp.gpr());
1898             speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, temp.gpr(), TrustedImm32(static_cast<int32_t>(~1))));
1899             break;
1900         }
1901         case FlushedCell: {
1902             speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, JITCompiler::addressFor(virtualRegister), GPRInfo::tagMaskRegister));
1903             break;
1904         }
1905         default:
1906             RELEASE_ASSERT_NOT_REACHED();
1907             break;
1908         }
1909 #else
1910         switch (format) {
1911         case FlushedInt32: {
1912             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::Int32Tag)));
1913             break;
1914         }
1915         case FlushedBoolean: {
1916             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::BooleanTag)));
1917             break;
1918         }
1919         case FlushedCell: {
1920             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::CellTag)));
1921             break;
1922         }
1923         default:
1924             RELEASE_ASSERT_NOT_REACHED();
1925             break;
1926         }
1927 #endif
1928     }
1929
1930     m_origin = NodeOrigin();
1931 }
1932
1933 bool SpeculativeJIT::compile()
1934 {
1935     checkArgumentTypes();
1936     
1937     ASSERT(!m_currentNode);
1938     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1939         m_jit.setForBlockIndex(blockIndex);
1940         m_block = m_jit.graph().block(blockIndex);
1941         compileCurrentBlock();
1942     }
1943     linkBranches();
1944     return true;
1945 }
1946
1947 void SpeculativeJIT::createOSREntries()
1948 {
1949     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1950         BasicBlock* block = m_jit.graph().block(blockIndex);
1951         if (!block)
1952             continue;
1953         if (block->isOSRTarget || block->isCatchEntrypoint) {
1954             // Currently we don't have OSR entry trampolines. We could add them
1955             // here if need be.
1956             m_osrEntryHeads.append(m_jit.blockHeads()[blockIndex]);
1957         }
1958     }
1959 }
1960
1961 void SpeculativeJIT::linkOSREntries(LinkBuffer& linkBuffer)
1962 {
1963     unsigned osrEntryIndex = 0;
1964     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1965         BasicBlock* block = m_jit.graph().block(blockIndex);
1966         if (!block)
1967             continue;
1968         if (!block->isOSRTarget && !block->isCatchEntrypoint)
1969             continue;
1970         if (block->isCatchEntrypoint) {
1971             auto& argumentsVector = m_jit.graph().m_rootToArguments.find(block)->value;
1972             Vector<FlushFormat> argumentFormats;
1973             argumentFormats.reserveInitialCapacity(argumentsVector.size());
1974             for (Node* setArgument : argumentsVector) {
1975                 if (setArgument) {
1976                     FlushFormat flushFormat = setArgument->variableAccessData()->flushFormat();
1977                     ASSERT(flushFormat == FlushedInt32 || flushFormat == FlushedCell || flushFormat == FlushedBoolean || flushFormat == FlushedJSValue);
1978                     argumentFormats.uncheckedAppend(flushFormat);
1979                 } else
1980                     argumentFormats.uncheckedAppend(DeadFlush);
1981             }
1982             m_jit.noticeCatchEntrypoint(*block, m_osrEntryHeads[osrEntryIndex++], linkBuffer, WTFMove(argumentFormats));
1983         } else {
1984             ASSERT(block->isOSRTarget);
1985             m_jit.noticeOSREntry(*block, m_osrEntryHeads[osrEntryIndex++], linkBuffer);
1986         }
1987     }
1988
1989     m_jit.jitCode()->finalizeOSREntrypoints();
1990     m_jit.jitCode()->common.finalizeCatchEntrypoints();
1991
1992     ASSERT(osrEntryIndex == m_osrEntryHeads.size());
1993     
1994     if (verboseCompilationEnabled()) {
1995         DumpContext dumpContext;
1996         dataLog("OSR Entries:\n");
1997         for (OSREntryData& entryData : m_jit.jitCode()->osrEntry)
1998             dataLog("    ", inContext(entryData, &dumpContext), "\n");
1999         if (!dumpContext.isEmpty())
2000             dumpContext.dump(WTF::dataFile());
2001     }
2002 }
2003     
2004 void SpeculativeJIT::compileCheckTraps(Node*)
2005 {
2006     ASSERT(Options::usePollingTraps());
2007     GPRTemporary unused(this);
2008     GPRReg unusedGPR = unused.gpr();
2009
2010     JITCompiler::Jump needTrapHandling = m_jit.branchTest8(JITCompiler::NonZero,
2011         JITCompiler::AbsoluteAddress(m_jit.vm()->needTrapHandlingAddress()));
2012
2013     addSlowPathGenerator(slowPathCall(needTrapHandling, this, operationHandleTraps, unusedGPR));
2014 }
2015
2016 void SpeculativeJIT::compileDoublePutByVal(Node* node, SpeculateCellOperand& base, SpeculateStrictInt32Operand& property)
2017 {
2018     Edge child3 = m_jit.graph().varArgChild(node, 2);
2019     Edge child4 = m_jit.graph().varArgChild(node, 3);
2020
2021     ArrayMode arrayMode = node->arrayMode();
2022     
2023     GPRReg baseReg = base.gpr();
2024     GPRReg propertyReg = property.gpr();
2025     
2026     SpeculateDoubleOperand value(this, child3);
2027
2028     FPRReg valueReg = value.fpr();
2029     
2030     DFG_TYPE_CHECK(
2031         JSValueRegs(), child3, SpecFullRealNumber,
2032         m_jit.branchDouble(
2033             MacroAssembler::DoubleNotEqualOrUnordered, valueReg, valueReg));
2034     
2035     if (!m_compileOkay)
2036         return;
2037     
2038     StorageOperand storage(this, child4);
2039     GPRReg storageReg = storage.gpr();
2040
2041     if (node->op() == PutByValAlias) {
2042         // Store the value to the array.
2043         GPRReg propertyReg = property.gpr();
2044         FPRReg valueReg = value.fpr();
2045         m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
2046         
2047         noResult(m_currentNode);
2048         return;
2049     }
2050     
2051     GPRTemporary temporary;
2052     GPRReg temporaryReg = temporaryRegisterForPutByVal(temporary, node);
2053
2054     MacroAssembler::Jump slowCase;
2055     
2056     if (arrayMode.isInBounds()) {
2057         speculationCheck(
2058             OutOfBounds, JSValueRegs(), 0,
2059             m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())));
2060     } else {
2061         MacroAssembler::Jump inBounds = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
2062         
2063         slowCase = m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfVectorLength()));
2064         
2065         if (!arrayMode.isOutOfBounds())
2066             speculationCheck(OutOfBounds, JSValueRegs(), 0, slowCase);
2067         
2068         m_jit.add32(TrustedImm32(1), propertyReg, temporaryReg);
2069         m_jit.store32(temporaryReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
2070
2071         inBounds.link(&m_jit);
2072     }
2073     
2074     m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
2075
2076     base.use();
2077     property.use();
2078     value.use();
2079     storage.use();
2080     
2081     if (arrayMode.isOutOfBounds()) {
2082         addSlowPathGenerator(
2083             slowPathCall(
2084                 slowCase, this,
2085                 m_jit.codeBlock()->isStrictMode()
2086                     ? (node->op() == PutByValDirect ? operationPutDoubleByValDirectBeyondArrayBoundsStrict : operationPutDoubleByValBeyondArrayBoundsStrict)
2087                     : (node->op() == PutByValDirect ? operationPutDoubleByValDirectBeyondArrayBoundsNonStrict : operationPutDoubleByValBeyondArrayBoundsNonStrict),
2088                 NoResult, baseReg, propertyReg, valueReg));
2089     }
2090
2091     noResult(m_currentNode, UseChildrenCalledExplicitly);
2092 }
2093
2094 void SpeculativeJIT::compileGetCharCodeAt(Node* node)
2095 {
2096     SpeculateCellOperand string(this, node->child1());
2097     SpeculateStrictInt32Operand index(this, node->child2());
2098     StorageOperand storage(this, node->child3());
2099
2100     GPRReg stringReg = string.gpr();
2101     GPRReg indexReg = index.gpr();
2102     GPRReg storageReg = storage.gpr();
2103     
2104     ASSERT(speculationChecked(m_state.forNode(node->child1()).m_type, SpecString));
2105
2106     // unsigned comparison so we can filter out negative indices and indices that are too large
2107     speculationCheck(Uncountable, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::AboveOrEqual, indexReg, MacroAssembler::Address(stringReg, JSString::offsetOfLength())));
2108
2109     GPRTemporary scratch(this);
2110     GPRReg scratchReg = scratch.gpr();
2111
2112     m_jit.loadPtr(MacroAssembler::Address(stringReg, JSString::offsetOfValue()), scratchReg);
2113     m_jit.and32(MacroAssembler::Address(scratchReg, StringImpl::maskOffset()), indexReg);
2114
2115     // Load the character into scratchReg
2116     JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
2117
2118     m_jit.load8(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesOne, 0), scratchReg);
2119     JITCompiler::Jump cont8Bit = m_jit.jump();
2120
2121     is16Bit.link(&m_jit);
2122
2123     m_jit.load16(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesTwo, 0), scratchReg);
2124
2125     cont8Bit.link(&m_jit);
2126
2127     int32Result(scratchReg, m_currentNode);
2128 }
2129
2130 void SpeculativeJIT::compileGetByValOnString(Node* node)
2131 {
2132     SpeculateCellOperand base(this, m_graph.child(node, 0));
2133     SpeculateStrictInt32Operand property(this, m_graph.child(node, 1));
2134     StorageOperand storage(this, m_graph.child(node, 2));
2135     GPRReg baseReg = base.gpr();
2136     GPRReg propertyReg = property.gpr();
2137     GPRReg storageReg = storage.gpr();
2138
2139     GPRTemporary scratch(this);
2140     GPRReg scratchReg = scratch.gpr();
2141 #if USE(JSVALUE32_64)
2142     GPRTemporary resultTag;
2143     GPRReg resultTagReg = InvalidGPRReg;
2144     if (node->arrayMode().isOutOfBounds()) {
2145         GPRTemporary realResultTag(this);
2146         resultTag.adopt(realResultTag);
2147         resultTagReg = resultTag.gpr();
2148     }
2149 #endif
2150
2151     ASSERT(ArrayMode(Array::String).alreadyChecked(m_jit.graph(), node, m_state.forNode(m_graph.child(node, 0))));
2152
2153     // unsigned comparison so we can filter out negative indices and indices that are too large
2154     JITCompiler::Jump outOfBounds = m_jit.branch32(
2155         MacroAssembler::AboveOrEqual, propertyReg,
2156         MacroAssembler::Address(baseReg, JSString::offsetOfLength()));
2157     if (node->arrayMode().isInBounds())
2158         speculationCheck(OutOfBounds, JSValueRegs(), 0, outOfBounds);
2159
2160     m_jit.loadPtr(MacroAssembler::Address(baseReg, JSString::offsetOfValue()), scratchReg);
2161     m_jit.and32(MacroAssembler::Address(scratchReg, StringImpl::maskOffset()), propertyReg);
2162
2163     // Load the character into scratchReg
2164     JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
2165
2166     m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne, 0), scratchReg);
2167     JITCompiler::Jump cont8Bit = m_jit.jump();
2168
2169     is16Bit.link(&m_jit);
2170
2171     m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo, 0), scratchReg);
2172
2173     JITCompiler::Jump bigCharacter =
2174         m_jit.branch32(MacroAssembler::AboveOrEqual, scratchReg, TrustedImm32(0x100));
2175
2176     // 8 bit string values don't need the isASCII check.
2177     cont8Bit.link(&m_jit);
2178
2179     m_jit.lshift32(MacroAssembler::TrustedImm32(sizeof(void*) == 4 ? 2 : 3), scratchReg);
2180     m_jit.addPtr(TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), scratchReg);
2181     m_jit.loadPtr(scratchReg, scratchReg);
2182
2183     addSlowPathGenerator(
2184         slowPathCall(
2185             bigCharacter, this, operationSingleCharacterString, scratchReg, scratchReg));
2186
2187     if (node->arrayMode().isOutOfBounds()) {
2188 #if USE(JSVALUE32_64)
2189         m_jit.move(TrustedImm32(JSValue::CellTag), resultTagReg);
2190 #endif
2191
2192         JSGlobalObject* globalObject = m_jit.globalObjectFor(node->origin.semantic);
2193         bool prototypeChainIsSane = false;
2194         if (globalObject->stringPrototypeChainIsSane()) {
2195             // FIXME: This could be captured using a Speculation mode that means "out-of-bounds
2196             // loads return a trivial value". Something like SaneChainOutOfBounds. This should
2197             // speculate that we don't take negative out-of-bounds, or better yet, it should rely
2198             // on a stringPrototypeChainIsSane() guaranteeing that the prototypes have no negative
2199             // indexed properties either.
2200             // https://bugs.webkit.org/show_bug.cgi?id=144668
2201             m_jit.graph().registerAndWatchStructureTransition(globalObject->stringPrototype()->structure());
2202             m_jit.graph().registerAndWatchStructureTransition(globalObject->objectPrototype()->structure());
2203             prototypeChainIsSane = globalObject->stringPrototypeChainIsSane();
2204         }
2205         if (prototypeChainIsSane) {
2206 #if USE(JSVALUE64)
2207             addSlowPathGenerator(std::make_unique<SaneStringGetByValSlowPathGenerator>(
2208                 outOfBounds, this, JSValueRegs(scratchReg), baseReg, propertyReg));
2209 #else
2210             addSlowPathGenerator(std::make_unique<SaneStringGetByValSlowPathGenerator>(
2211                 outOfBounds, this, JSValueRegs(resultTagReg, scratchReg),
2212                 baseReg, propertyReg));
2213 #endif
2214         } else {
2215 #if USE(JSVALUE64)
2216             addSlowPathGenerator(
2217                 slowPathCall(
2218                     outOfBounds, this, operationGetByValStringInt,
2219                     scratchReg, baseReg, propertyReg));
2220 #else
2221             addSlowPathGenerator(
2222                 slowPathCall(
2223                     outOfBounds, this, operationGetByValStringInt,
2224                     JSValueRegs(resultTagReg, scratchReg), baseReg, propertyReg));
2225 #endif
2226         }
2227         
2228 #if USE(JSVALUE64)
2229         jsValueResult(scratchReg, m_currentNode);
2230 #else
2231         jsValueResult(resultTagReg, scratchReg, m_currentNode);
2232 #endif
2233     } else
2234         cellResult(scratchReg, m_currentNode);
2235 }
2236
2237 void SpeculativeJIT::compileFromCharCode(Node* node)
2238 {
2239     Edge& child = node->child1();
2240     if (child.useKind() == UntypedUse) {
2241         JSValueOperand opr(this, child);
2242         JSValueRegs oprRegs = opr.jsValueRegs();
2243
2244         flushRegisters();
2245         JSValueRegsFlushedCallResult result(this);
2246         JSValueRegs resultRegs = result.regs();
2247         callOperation(operationStringFromCharCodeUntyped, resultRegs, oprRegs);
2248         m_jit.exceptionCheck();
2249         
2250         jsValueResult(resultRegs, node);
2251         return;
2252     }
2253
2254     SpeculateStrictInt32Operand property(this, child);
2255     GPRReg propertyReg = property.gpr();
2256     GPRTemporary smallStrings(this);
2257     GPRTemporary scratch(this);
2258     GPRReg scratchReg = scratch.gpr();
2259     GPRReg smallStringsReg = smallStrings.gpr();
2260
2261     JITCompiler::JumpList slowCases;
2262     slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, TrustedImm32(0xff)));
2263     m_jit.move(TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), smallStringsReg);
2264     m_jit.loadPtr(MacroAssembler::BaseIndex(smallStringsReg, propertyReg, MacroAssembler::ScalePtr, 0), scratchReg);
2265
2266     slowCases.append(m_jit.branchTest32(MacroAssembler::Zero, scratchReg));
2267     addSlowPathGenerator(slowPathCall(slowCases, this, operationStringFromCharCode, scratchReg, propertyReg));
2268     cellResult(scratchReg, m_currentNode);
2269 }
2270
2271 GeneratedOperandType SpeculativeJIT::checkGeneratedTypeForToInt32(Node* node)
2272 {
2273     VirtualRegister virtualRegister = node->virtualRegister();
2274     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
2275
2276     switch (info.registerFormat()) {
2277     case DataFormatStorage:
2278         RELEASE_ASSERT_NOT_REACHED();
2279
2280     case DataFormatBoolean:
2281     case DataFormatCell:
2282         terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
2283         return GeneratedOperandTypeUnknown;
2284
2285     case DataFormatNone:
2286     case DataFormatJSCell:
2287     case DataFormatJS:
2288     case DataFormatJSBoolean:
2289     case DataFormatJSDouble:
2290         return GeneratedOperandJSValue;
2291
2292     case DataFormatJSInt32:
2293     case DataFormatInt32:
2294         return GeneratedOperandInteger;
2295
2296     default:
2297         RELEASE_ASSERT_NOT_REACHED();
2298         return GeneratedOperandTypeUnknown;
2299     }
2300 }
2301
2302 void SpeculativeJIT::compileValueToInt32(Node* node)
2303 {
2304     switch (node->child1().useKind()) {
2305 #if USE(JSVALUE64)
2306     case Int52RepUse: {
2307         SpeculateStrictInt52Operand op1(this, node->child1());
2308         GPRTemporary result(this, Reuse, op1);
2309         GPRReg op1GPR = op1.gpr();
2310         GPRReg resultGPR = result.gpr();
2311         m_jit.zeroExtend32ToPtr(op1GPR, resultGPR);
2312         int32Result(resultGPR, node, DataFormatInt32);
2313         return;
2314     }
2315 #endif // USE(JSVALUE64)
2316         
2317     case DoubleRepUse: {
2318         GPRTemporary result(this);
2319         SpeculateDoubleOperand op1(this, node->child1());
2320         FPRReg fpr = op1.fpr();
2321         GPRReg gpr = result.gpr();
2322         JITCompiler::Jump notTruncatedToInteger = m_jit.branchTruncateDoubleToInt32(fpr, gpr, JITCompiler::BranchIfTruncateFailed);
2323         
2324         addSlowPathGenerator(slowPathCall(notTruncatedToInteger, this,
2325             hasSensibleDoubleToInt() ? operationToInt32SensibleSlow : operationToInt32, NeedToSpill, ExceptionCheckRequirement::CheckNotNeeded, gpr, fpr));
2326         
2327         int32Result(gpr, node);
2328         return;
2329     }
2330     
2331     case NumberUse:
2332     case NotCellUse: {
2333         switch (checkGeneratedTypeForToInt32(node->child1().node())) {
2334         case GeneratedOperandInteger: {
2335             SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
2336             GPRTemporary result(this, Reuse, op1);
2337             m_jit.move(op1.gpr(), result.gpr());
2338             int32Result(result.gpr(), node, op1.format());
2339             return;
2340         }
2341         case GeneratedOperandJSValue: {
2342             GPRTemporary result(this);
2343 #if USE(JSVALUE64)
2344             JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2345
2346             GPRReg gpr = op1.gpr();
2347             GPRReg resultGpr = result.gpr();
2348             FPRTemporary tempFpr(this);
2349             FPRReg fpr = tempFpr.fpr();
2350
2351             JITCompiler::Jump isInteger = m_jit.branch64(MacroAssembler::AboveOrEqual, gpr, GPRInfo::tagTypeNumberRegister);
2352             JITCompiler::JumpList converted;
2353
2354             if (node->child1().useKind() == NumberUse) {
2355                 DFG_TYPE_CHECK(
2356                     JSValueRegs(gpr), node->child1(), SpecBytecodeNumber,
2357                     m_jit.branchTest64(
2358                         MacroAssembler::Zero, gpr, GPRInfo::tagTypeNumberRegister));
2359             } else {
2360                 JITCompiler::Jump isNumber = m_jit.branchTest64(MacroAssembler::NonZero, gpr, GPRInfo::tagTypeNumberRegister);
2361                 
2362                 DFG_TYPE_CHECK(
2363                     JSValueRegs(gpr), node->child1(), ~SpecCellCheck, m_jit.branchIfCell(JSValueRegs(gpr)));
2364                 
2365                 // It's not a cell: so true turns into 1 and all else turns into 0.
2366                 m_jit.compare64(JITCompiler::Equal, gpr, TrustedImm32(ValueTrue), resultGpr);
2367                 converted.append(m_jit.jump());
2368                 
2369                 isNumber.link(&m_jit);
2370             }
2371
2372             // First, if we get here we have a double encoded as a JSValue
2373             unboxDouble(gpr, resultGpr, fpr);
2374
2375             silentSpillAllRegisters(resultGpr);
2376             callOperation(operationToInt32, resultGpr, fpr);
2377             silentFillAllRegisters();
2378
2379             converted.append(m_jit.jump());
2380
2381             isInteger.link(&m_jit);
2382             m_jit.zeroExtend32ToPtr(gpr, resultGpr);
2383
2384             converted.link(&m_jit);
2385 #else
2386             Node* childNode = node->child1().node();
2387             VirtualRegister virtualRegister = childNode->virtualRegister();
2388             GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
2389
2390             JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2391
2392             GPRReg payloadGPR = op1.payloadGPR();
2393             GPRReg resultGpr = result.gpr();
2394         
2395             JITCompiler::JumpList converted;
2396
2397             if (info.registerFormat() == DataFormatJSInt32)
2398                 m_jit.move(payloadGPR, resultGpr);
2399             else {
2400                 GPRReg tagGPR = op1.tagGPR();
2401                 FPRTemporary tempFpr(this);
2402                 FPRReg fpr = tempFpr.fpr();
2403                 FPRTemporary scratch(this);
2404
2405                 JITCompiler::Jump isInteger = m_jit.branch32(MacroAssembler::Equal, tagGPR, TrustedImm32(JSValue::Int32Tag));
2406
2407                 if (node->child1().useKind() == NumberUse) {
2408                     DFG_TYPE_CHECK(
2409                         op1.jsValueRegs(), node->child1(), SpecBytecodeNumber,
2410                         m_jit.branch32(
2411                             MacroAssembler::AboveOrEqual, tagGPR,
2412                             TrustedImm32(JSValue::LowestTag)));
2413                 } else {
2414                     JITCompiler::Jump isNumber = m_jit.branch32(MacroAssembler::Below, tagGPR, TrustedImm32(JSValue::LowestTag));
2415                     
2416                     DFG_TYPE_CHECK(
2417                         op1.jsValueRegs(), node->child1(), ~SpecCell,
2418                         m_jit.branchIfCell(op1.jsValueRegs()));
2419                     
2420                     // It's not a cell: so true turns into 1 and all else turns into 0.
2421                     JITCompiler::Jump isBoolean = m_jit.branch32(JITCompiler::Equal, tagGPR, TrustedImm32(JSValue::BooleanTag));
2422                     m_jit.move(TrustedImm32(0), resultGpr);
2423                     converted.append(m_jit.jump());
2424                     
2425                     isBoolean.link(&m_jit);
2426                     m_jit.move(payloadGPR, resultGpr);
2427                     converted.append(m_jit.jump());
2428                     
2429                     isNumber.link(&m_jit);
2430                 }
2431
2432                 unboxDouble(tagGPR, payloadGPR, fpr, scratch.fpr());
2433
2434                 silentSpillAllRegisters(resultGpr);
2435                 callOperation(operationToInt32, resultGpr, fpr);
2436                 silentFillAllRegisters();
2437
2438                 converted.append(m_jit.jump());
2439
2440                 isInteger.link(&m_jit);
2441                 m_jit.move(payloadGPR, resultGpr);
2442
2443                 converted.link(&m_jit);
2444             }
2445 #endif
2446             int32Result(resultGpr, node);
2447             return;
2448         }
2449         case GeneratedOperandTypeUnknown:
2450             RELEASE_ASSERT(!m_compileOkay);
2451             return;
2452         }
2453         RELEASE_ASSERT_NOT_REACHED();
2454         return;
2455     }
2456     
2457     default:
2458         ASSERT(!m_compileOkay);
2459         return;
2460     }
2461 }
2462
2463 void SpeculativeJIT::compileUInt32ToNumber(Node* node)
2464 {
2465     if (doesOverflow(node->arithMode())) {
2466         if (enableInt52()) {
2467             SpeculateInt32Operand op1(this, node->child1());
2468             GPRTemporary result(this, Reuse, op1);
2469             m_jit.zeroExtend32ToPtr(op1.gpr(), result.gpr());
2470             strictInt52Result(result.gpr(), node);
2471             return;
2472         }
2473         SpeculateInt32Operand op1(this, node->child1());
2474         FPRTemporary result(this);
2475             
2476         GPRReg inputGPR = op1.gpr();
2477         FPRReg outputFPR = result.fpr();
2478             
2479         m_jit.convertInt32ToDouble(inputGPR, outputFPR);
2480             
2481         JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, inputGPR, TrustedImm32(0));
2482         m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), outputFPR);
2483         positive.link(&m_jit);
2484             
2485         doubleResult(outputFPR, node);
2486         return;
2487     }
2488     
2489     RELEASE_ASSERT(node->arithMode() == Arith::CheckOverflow);
2490
2491     SpeculateInt32Operand op1(this, node->child1());
2492     GPRTemporary result(this);
2493
2494     m_jit.move(op1.gpr(), result.gpr());
2495
2496     speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, result.gpr(), TrustedImm32(0)));
2497
2498     int32Result(result.gpr(), node, op1.format());
2499 }
2500
2501 void SpeculativeJIT::compileDoubleAsInt32(Node* node)
2502 {
2503     SpeculateDoubleOperand op1(this, node->child1());
2504     FPRTemporary scratch(this);
2505     GPRTemporary result(this);
2506     
2507     FPRReg valueFPR = op1.fpr();
2508     FPRReg scratchFPR = scratch.fpr();
2509     GPRReg resultGPR = result.gpr();
2510
2511     JITCompiler::JumpList failureCases;
2512     RELEASE_ASSERT(shouldCheckOverflow(node->arithMode()));
2513     m_jit.branchConvertDoubleToInt32(
2514         valueFPR, resultGPR, failureCases, scratchFPR,
2515         shouldCheckNegativeZero(node->arithMode()));
2516     speculationCheck(Overflow, JSValueRegs(), 0, failureCases);
2517
2518     int32Result(resultGPR, node);
2519 }
2520
2521 void SpeculativeJIT::compileDoubleRep(Node* node)
2522 {
2523     switch (node->child1().useKind()) {
2524     case RealNumberUse: {
2525         JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2526         FPRTemporary result(this);
2527         
2528         JSValueRegs op1Regs = op1.jsValueRegs();
2529         FPRReg resultFPR = result.fpr();
2530         
2531 #if USE(JSVALUE64)
2532         GPRTemporary temp(this);
2533         GPRReg tempGPR = temp.gpr();
2534         m_jit.unboxDoubleWithoutAssertions(op1Regs.gpr(), tempGPR, resultFPR);
2535 #else
2536         FPRTemporary temp(this);
2537         FPRReg tempFPR = temp.fpr();
2538         unboxDouble(op1Regs.tagGPR(), op1Regs.payloadGPR(), resultFPR, tempFPR);
2539 #endif
2540         
2541         JITCompiler::Jump done = m_jit.branchDouble(
2542             JITCompiler::DoubleEqual, resultFPR, resultFPR);
2543         
2544         DFG_TYPE_CHECK(
2545             op1Regs, node->child1(), SpecBytecodeRealNumber, m_jit.branchIfNotInt32(op1Regs));
2546         m_jit.convertInt32ToDouble(op1Regs.payloadGPR(), resultFPR);
2547         
2548         done.link(&m_jit);
2549         
2550         doubleResult(resultFPR, node);
2551         return;
2552     }
2553     
2554     case NotCellUse:
2555     case NumberUse: {
2556         ASSERT(!node->child1()->isNumberConstant()); // This should have been constant folded.
2557
2558         SpeculatedType possibleTypes = m_state.forNode(node->child1()).m_type;
2559         if (isInt32Speculation(possibleTypes)) {
2560             SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
2561             FPRTemporary result(this);
2562             m_jit.convertInt32ToDouble(op1.gpr(), result.fpr());
2563             doubleResult(result.fpr(), node);
2564             return;
2565         }
2566
2567         JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2568         FPRTemporary result(this);
2569
2570 #if USE(JSVALUE64)
2571         GPRTemporary temp(this);
2572
2573         GPRReg op1GPR = op1.gpr();
2574         GPRReg tempGPR = temp.gpr();
2575         FPRReg resultFPR = result.fpr();
2576         JITCompiler::JumpList done;
2577
2578         JITCompiler::Jump isInteger = m_jit.branch64(
2579             MacroAssembler::AboveOrEqual, op1GPR, GPRInfo::tagTypeNumberRegister);
2580
2581         if (node->child1().useKind() == NotCellUse) {
2582             JITCompiler::Jump isNumber = m_jit.branchTest64(MacroAssembler::NonZero, op1GPR, GPRInfo::tagTypeNumberRegister);
2583             JITCompiler::Jump isUndefined = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueUndefined));
2584
2585             static const double zero = 0;
2586             m_jit.loadDouble(TrustedImmPtr(&zero), resultFPR);
2587
2588             JITCompiler::Jump isNull = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueNull));
2589             done.append(isNull);
2590
2591             DFG_TYPE_CHECK(JSValueRegs(op1GPR), node->child1(), ~SpecCellCheck,
2592                 m_jit.branchTest64(JITCompiler::Zero, op1GPR, TrustedImm32(static_cast<int32_t>(TagBitBool))));
2593
2594             JITCompiler::Jump isFalse = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueFalse));
2595             static const double one = 1;
2596             m_jit.loadDouble(TrustedImmPtr(&one), resultFPR);
2597             done.append(m_jit.jump());
2598             done.append(isFalse);
2599
2600             isUndefined.link(&m_jit);
2601             static const double NaN = PNaN;
2602             m_jit.loadDouble(TrustedImmPtr(&NaN), resultFPR);
2603             done.append(m_jit.jump());
2604
2605             isNumber.link(&m_jit);
2606         } else if (needsTypeCheck(node->child1(), SpecBytecodeNumber)) {
2607             typeCheck(
2608                 JSValueRegs(op1GPR), node->child1(), SpecBytecodeNumber,
2609                 m_jit.branchTest64(MacroAssembler::Zero, op1GPR, GPRInfo::tagTypeNumberRegister));
2610         }
2611
2612         unboxDouble(op1GPR, tempGPR, resultFPR);
2613         done.append(m_jit.jump());
2614     
2615         isInteger.link(&m_jit);
2616         m_jit.convertInt32ToDouble(op1GPR, resultFPR);
2617         done.link(&m_jit);
2618 #else // USE(JSVALUE64) -> this is the 32_64 case
2619         FPRTemporary temp(this);
2620     
2621         GPRReg op1TagGPR = op1.tagGPR();
2622         GPRReg op1PayloadGPR = op1.payloadGPR();
2623         FPRReg tempFPR = temp.fpr();
2624         FPRReg resultFPR = result.fpr();
2625         JITCompiler::JumpList done;
2626     
2627         JITCompiler::Jump isInteger = m_jit.branch32(
2628             MacroAssembler::Equal, op1TagGPR, TrustedImm32(JSValue::Int32Tag));
2629
2630         if (node->child1().useKind() == NotCellUse) {
2631             JITCompiler::Jump isNumber = m_jit.branch32(JITCompiler::Below, op1TagGPR, JITCompiler::TrustedImm32(JSValue::LowestTag + 1));
2632             JITCompiler::Jump isUndefined = m_jit.branch32(JITCompiler::Equal, op1TagGPR, TrustedImm32(JSValue::UndefinedTag));
2633
2634             static const double zero = 0;
2635             m_jit.loadDouble(TrustedImmPtr(&zero), resultFPR);
2636
2637             JITCompiler::Jump isNull = m_jit.branch32(JITCompiler::Equal, op1TagGPR, TrustedImm32(JSValue::NullTag));
2638             done.append(isNull);
2639
2640             DFG_TYPE_CHECK(JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), ~SpecCell, m_jit.branch32(JITCompiler::NotEqual, op1TagGPR, TrustedImm32(JSValue::BooleanTag)));
2641
2642             JITCompiler::Jump isFalse = m_jit.branchTest32(JITCompiler::Zero, op1PayloadGPR, TrustedImm32(1));
2643             static const double one = 1;
2644             m_jit.loadDouble(TrustedImmPtr(&one), resultFPR);
2645             done.append(m_jit.jump());
2646             done.append(isFalse);
2647
2648             isUndefined.link(&m_jit);
2649             static const double NaN = PNaN;
2650             m_jit.loadDouble(TrustedImmPtr(&NaN), resultFPR);
2651             done.append(m_jit.jump());
2652
2653             isNumber.link(&m_jit);
2654         } else if (needsTypeCheck(node->child1(), SpecBytecodeNumber)) {
2655             typeCheck(
2656                 JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), SpecBytecodeNumber,
2657                 m_jit.branch32(MacroAssembler::AboveOrEqual, op1TagGPR, TrustedImm32(JSValue::LowestTag)));
2658         }
2659
2660         unboxDouble(op1TagGPR, op1PayloadGPR, resultFPR, tempFPR);
2661         done.append(m_jit.jump());
2662     
2663         isInteger.link(&m_jit);
2664         m_jit.convertInt32ToDouble(op1PayloadGPR, resultFPR);
2665         done.link(&m_jit);
2666 #endif // USE(JSVALUE64)
2667     
2668         doubleResult(resultFPR, node);
2669         return;
2670     }
2671         
2672 #if USE(JSVALUE64)
2673     case Int52RepUse: {
2674         SpeculateStrictInt52Operand value(this, node->child1());
2675         FPRTemporary result(this);
2676         
2677         GPRReg valueGPR = value.gpr();
2678         FPRReg resultFPR = result.fpr();
2679
2680         m_jit.convertInt64ToDouble(valueGPR, resultFPR);
2681         
2682         doubleResult(resultFPR, node);
2683         return;
2684     }
2685 #endif // USE(JSVALUE64)
2686         
2687     default:
2688         RELEASE_ASSERT_NOT_REACHED();
2689         return;
2690     }
2691 }
2692
2693 void SpeculativeJIT::compileValueRep(Node* node)
2694 {
2695     switch (node->child1().useKind()) {
2696     case DoubleRepUse: {
2697         SpeculateDoubleOperand value(this, node->child1());
2698         JSValueRegsTemporary result(this);
2699         
2700         FPRReg valueFPR = value.fpr();
2701         JSValueRegs resultRegs = result.regs();
2702         
2703         // It's very tempting to in-place filter the value to indicate that it's not impure NaN
2704         // anymore. Unfortunately, this would be unsound. If it's a GetLocal or if the value was
2705         // subject to a prior SetLocal, filtering the value would imply that the corresponding
2706         // local was purified.
2707         if (needsTypeCheck(node->child1(), ~SpecDoubleImpureNaN))
2708             m_jit.purifyNaN(valueFPR);
2709
2710         boxDouble(valueFPR, resultRegs);
2711         
2712         jsValueResult(resultRegs, node);
2713         return;
2714     }
2715         
2716 #if USE(JSVALUE64)
2717     case Int52RepUse: {
2718         SpeculateStrictInt52Operand value(this, node->child1());
2719         GPRTemporary result(this);
2720         
2721         GPRReg valueGPR = value.gpr();
2722         GPRReg resultGPR = result.gpr();
2723         
2724         boxInt52(valueGPR, resultGPR, DataFormatStrictInt52);
2725         
2726         jsValueResult(resultGPR, node);
2727         return;
2728     }
2729 #endif // USE(JSVALUE64)
2730         
2731     default:
2732         RELEASE_ASSERT_NOT_REACHED();
2733         return;
2734     }
2735 }
2736
2737 static double clampDoubleToByte(double d)
2738 {
2739     d += 0.5;
2740     if (!(d > 0))
2741         d = 0;
2742     else if (d > 255)
2743         d = 255;
2744     return d;
2745 }
2746
2747 static void compileClampIntegerToByte(JITCompiler& jit, GPRReg result)
2748 {
2749     MacroAssembler::Jump inBounds = jit.branch32(MacroAssembler::BelowOrEqual, result, JITCompiler::TrustedImm32(0xff));
2750     MacroAssembler::Jump tooBig = jit.branch32(MacroAssembler::GreaterThan, result, JITCompiler::TrustedImm32(0xff));
2751     jit.xorPtr(result, result);
2752     MacroAssembler::Jump clamped = jit.jump();
2753     tooBig.link(&jit);
2754     jit.move(JITCompiler::TrustedImm32(255), result);
2755     clamped.link(&jit);
2756     inBounds.link(&jit);
2757 }
2758
2759 static void compileClampDoubleToByte(JITCompiler& jit, GPRReg result, FPRReg source, FPRReg scratch)
2760 {
2761     // Unordered compare so we pick up NaN
2762     static const double zero = 0;
2763     static const double byteMax = 255;
2764     static const double half = 0.5;
2765     jit.loadDouble(JITCompiler::TrustedImmPtr(&zero), scratch);
2766     MacroAssembler::Jump tooSmall = jit.branchDouble(MacroAssembler::DoubleLessThanOrEqualOrUnordered, source, scratch);
2767     jit.loadDouble(JITCompiler::TrustedImmPtr(&byteMax), scratch);
2768     MacroAssembler::Jump tooBig = jit.branchDouble(MacroAssembler::DoubleGreaterThan, source, scratch);
2769     
2770     jit.loadDouble(JITCompiler::TrustedImmPtr(&half), scratch);
2771     // FIXME: This should probably just use a floating point round!
2772     // https://bugs.webkit.org/show_bug.cgi?id=72054
2773     jit.addDouble(source, scratch);
2774     jit.truncateDoubleToInt32(scratch, result);   
2775     MacroAssembler::Jump truncatedInt = jit.jump();
2776     
2777     tooSmall.link(&jit);
2778     jit.xorPtr(result, result);
2779     MacroAssembler::Jump zeroed = jit.jump();
2780     
2781     tooBig.link(&jit);
2782     jit.move(JITCompiler::TrustedImm32(255), result);
2783     
2784     truncatedInt.link(&jit);
2785     zeroed.link(&jit);
2786
2787 }
2788
2789 JITCompiler::Jump SpeculativeJIT::jumpForTypedArrayOutOfBounds(Node* node, GPRReg baseGPR, GPRReg indexGPR)
2790 {
2791     if (node->op() == PutByValAlias)
2792         return JITCompiler::Jump();
2793     JSArrayBufferView* view = m_jit.graph().tryGetFoldableView(
2794         m_state.forNode(m_jit.graph().child(node, 0)).m_value, node->arrayMode());
2795     if (view) {
2796         uint32_t length = view->length();
2797         Node* indexNode = m_jit.graph().child(node, 1).node();
2798         if (indexNode->isInt32Constant() && indexNode->asUInt32() < length)
2799             return JITCompiler::Jump();
2800         return m_jit.branch32(
2801             MacroAssembler::AboveOrEqual, indexGPR, MacroAssembler::Imm32(length));
2802     }
2803     return m_jit.branch32(
2804         MacroAssembler::AboveOrEqual, indexGPR,
2805         MacroAssembler::Address(baseGPR, JSArrayBufferView::offsetOfLength()));
2806 }
2807
2808 void SpeculativeJIT::emitTypedArrayBoundsCheck(Node* node, GPRReg baseGPR, GPRReg indexGPR)
2809 {
2810     JITCompiler::Jump jump = jumpForTypedArrayOutOfBounds(node, baseGPR, indexGPR);
2811     if (!jump.isSet())
2812         return;
2813     speculationCheck(OutOfBounds, JSValueRegs(), 0, jump);
2814 }
2815
2816 JITCompiler::Jump SpeculativeJIT::jumpForTypedArrayIsNeuteredIfOutOfBounds(Node* node, GPRReg base, JITCompiler::Jump outOfBounds)
2817 {
2818     JITCompiler::Jump done;
2819     if (outOfBounds.isSet()) {
2820         done = m_jit.jump();
2821         if (node->arrayMode().isInBounds())
2822             speculationCheck(OutOfBounds, JSValueSource(), 0, outOfBounds);
2823         else {
2824             outOfBounds.link(&m_jit);
2825
2826             JITCompiler::Jump notWasteful = m_jit.branch32(
2827                 MacroAssembler::NotEqual,
2828                 MacroAssembler::Address(base, JSArrayBufferView::offsetOfMode()),
2829                 TrustedImm32(WastefulTypedArray));
2830
2831             JITCompiler::Jump hasNullVector = m_jit.branchTestPtr(
2832                 MacroAssembler::Zero,
2833                 MacroAssembler::Address(base, JSArrayBufferView::offsetOfPoisonedVector()));
2834             speculationCheck(Uncountable, JSValueSource(), node, hasNullVector);
2835             notWasteful.link(&m_jit);
2836         }
2837     }
2838     return done;
2839 }
2840
2841 void SpeculativeJIT::loadFromIntTypedArray(GPRReg baseReg, GPRReg storageReg, GPRReg propertyReg, GPRReg resultReg, TypedArrayType type)
2842 {
2843     if (m_indexMaskingMode == IndexMaskingEnabled)
2844         m_jit.and32(MacroAssembler::Address(baseReg, JSObject::butterflyIndexingMaskOffset()), propertyReg);
2845     switch (elementSize(type)) {
2846     case 1:
2847         if (isSigned(type))
2848             m_jit.load8SignedExtendTo32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2849         else
2850             m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2851         break;
2852     case 2:
2853         if (isSigned(type))
2854             m_jit.load16SignedExtendTo32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2855         else
2856             m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2857         break;
2858     case 4:
2859         m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
2860         break;
2861     default:
2862         CRASH();
2863     }
2864 }
2865
2866 void SpeculativeJIT::setIntTypedArrayLoadResult(Node* node, GPRReg resultReg, TypedArrayType type, bool canSpeculate)
2867 {
2868     if (elementSize(type) < 4 || isSigned(type)) {
2869         int32Result(resultReg, node);
2870         return;
2871     }
2872     
2873     ASSERT(elementSize(type) == 4 && !isSigned(type));
2874     if (node->shouldSpeculateInt32() && canSpeculate) {
2875         speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, resultReg, TrustedImm32(0)));
2876         int32Result(resultReg, node);
2877         return;
2878     }
2879     
2880 #if USE(JSVALUE64)
2881     if (node->shouldSpeculateAnyInt()) {
2882         m_jit.zeroExtend32ToPtr(resultReg, resultReg);
2883         strictInt52Result(resultReg, node);
2884         return;
2885     }
2886 #endif
2887     
2888     FPRTemporary fresult(this);
2889     m_jit.convertInt32ToDouble(resultReg, fresult.fpr());
2890     JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, resultReg, TrustedImm32(0));
2891     m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), fresult.fpr());
2892     positive.link(&m_jit);
2893     doubleResult(fresult.fpr(), node);
2894 }
2895
2896 void SpeculativeJIT::compileGetByValOnIntTypedArray(Node* node, TypedArrayType type)
2897 {
2898     ASSERT(isInt(type));
2899     
2900     SpeculateCellOperand base(this, m_graph.varArgChild(node, 0));
2901     SpeculateStrictInt32Operand property(this, m_graph.varArgChild(node, 1));
2902     StorageOperand storage(this, m_graph.varArgChild(node, 2));
2903
2904     GPRReg baseReg = base.gpr();
2905     GPRReg propertyReg = property.gpr();
2906     GPRReg storageReg = storage.gpr();
2907
2908     GPRTemporary result(this);
2909     GPRReg resultReg = result.gpr();
2910
2911     ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(m_graph.varArgChild(node, 0))));
2912
2913     emitTypedArrayBoundsCheck(node, baseReg, propertyReg);
2914     loadFromIntTypedArray(baseReg, storageReg, propertyReg, resultReg, type);
2915     bool canSpeculate = true;
2916     setIntTypedArrayLoadResult(node, resultReg, type, canSpeculate);
2917 }
2918
2919 bool SpeculativeJIT::getIntTypedArrayStoreOperand(
2920     GPRTemporary& value,
2921     GPRReg property,
2922 #if USE(JSVALUE32_64)
2923     GPRTemporary& propertyTag,
2924     GPRTemporary& valueTag,
2925 #endif
2926     Edge valueUse, JITCompiler::JumpList& slowPathCases, bool isClamped)
2927 {
2928     bool isAppropriateConstant = false;
2929     if (valueUse->isConstant()) {
2930         JSValue jsValue = valueUse->asJSValue();
2931         SpeculatedType expectedType = typeFilterFor(valueUse.useKind());
2932         SpeculatedType actualType = speculationFromValue(jsValue);
2933         isAppropriateConstant = (expectedType | actualType) == expectedType;
2934     }
2935     
2936     if (isAppropriateConstant) {
2937         JSValue jsValue = valueUse->asJSValue();
2938         if (!jsValue.isNumber()) {
2939             terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
2940             return false;
2941         }
2942         double d = jsValue.asNumber();
2943         if (isClamped)
2944             d = clampDoubleToByte(d);
2945         GPRTemporary scratch(this);
2946         GPRReg scratchReg = scratch.gpr();
2947         m_jit.move(Imm32(toInt32(d)), scratchReg);
2948         value.adopt(scratch);
2949     } else {
2950         switch (valueUse.useKind()) {
2951         case Int32Use: {
2952             SpeculateInt32Operand valueOp(this, valueUse);
2953             GPRTemporary scratch(this);
2954             GPRReg scratchReg = scratch.gpr();
2955             m_jit.move(valueOp.gpr(), scratchReg);
2956             if (isClamped)
2957                 compileClampIntegerToByte(m_jit, scratchReg);
2958             value.adopt(scratch);
2959             break;
2960         }
2961             
2962 #if USE(JSVALUE64)
2963         case Int52RepUse: {
2964             SpeculateStrictInt52Operand valueOp(this, valueUse);
2965             GPRTemporary scratch(this);
2966             GPRReg scratchReg = scratch.gpr();
2967             m_jit.move(valueOp.gpr(), scratchReg);
2968             if (isClamped) {
2969                 MacroAssembler::Jump inBounds = m_jit.branch64(
2970                     MacroAssembler::BelowOrEqual, scratchReg, JITCompiler::TrustedImm64(0xff));
2971                 MacroAssembler::Jump tooBig = m_jit.branch64(
2972                     MacroAssembler::GreaterThan, scratchReg, JITCompiler::TrustedImm64(0xff));
2973                 m_jit.move(TrustedImm32(0), scratchReg);
2974                 MacroAssembler::Jump clamped = m_jit.jump();
2975                 tooBig.link(&m_jit);
2976                 m_jit.move(JITCompiler::TrustedImm32(255), scratchReg);
2977                 clamped.link(&m_jit);
2978                 inBounds.link(&m_jit);
2979             }
2980             value.adopt(scratch);
2981             break;
2982         }
2983 #endif // USE(JSVALUE64)
2984             
2985         case DoubleRepUse: {
2986             RELEASE_ASSERT(!isAtomicsIntrinsic(m_currentNode->op()));
2987             if (isClamped) {
2988                 SpeculateDoubleOperand valueOp(this, valueUse);
2989                 GPRTemporary result(this);
2990                 FPRTemporary floatScratch(this);
2991                 FPRReg fpr = valueOp.fpr();
2992                 GPRReg gpr = result.gpr();
2993                 compileClampDoubleToByte(m_jit, gpr, fpr, floatScratch.fpr());
2994                 value.adopt(result);
2995             } else {
2996 #if USE(JSVALUE32_64)
2997                 GPRTemporary realPropertyTag(this);
2998                 propertyTag.adopt(realPropertyTag);
2999                 GPRReg propertyTagGPR = propertyTag.gpr();
3000
3001                 GPRTemporary realValueTag(this);
3002                 valueTag.adopt(realValueTag);
3003                 GPRReg valueTagGPR = valueTag.gpr();
3004 #endif
3005                 SpeculateDoubleOperand valueOp(this, valueUse);
3006                 GPRTemporary result(this);
3007                 FPRReg fpr = valueOp.fpr();
3008                 GPRReg gpr = result.gpr();
3009                 MacroAssembler::Jump notNaN = m_jit.branchDouble(MacroAssembler::DoubleEqual, fpr, fpr);
3010                 m_jit.xorPtr(gpr, gpr);
3011                 MacroAssembler::JumpList fixed(m_jit.jump());
3012                 notNaN.link(&m_jit);
3013
3014                 fixed.append(m_jit.branchTruncateDoubleToInt32(
3015                     fpr, gpr, MacroAssembler::BranchIfTruncateSuccessful));
3016
3017 #if USE(JSVALUE64)
3018                 m_jit.or64(GPRInfo::tagTypeNumberRegister, property);
3019                 boxDouble(fpr, gpr);
3020 #else
3021                 UNUSED_PARAM(property);
3022                 m_jit.move(TrustedImm32(JSValue::Int32Tag), propertyTagGPR);
3023                 boxDouble(fpr, valueTagGPR, gpr);
3024 #endif
3025                 slowPathCases.append(m_jit.jump());
3026
3027                 fixed.link(&m_jit);
3028                 value.adopt(result);
3029             }
3030             break;
3031         }
3032             
3033         default:
3034             RELEASE_ASSERT_NOT_REACHED();
3035             break;
3036         }
3037     }
3038     return true;
3039 }
3040
3041 void SpeculativeJIT::compilePutByValForIntTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
3042 {
3043     ASSERT(isInt(type));
3044     
3045     StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
3046     GPRReg storageReg = storage.gpr();
3047     
3048     Edge valueUse = m_jit.graph().varArgChild(node, 2);
3049     
3050     GPRTemporary value;
3051 #if USE(JSVALUE32_64)
3052     GPRTemporary propertyTag;
3053     GPRTemporary valueTag;
3054 #endif
3055
3056     JITCompiler::JumpList slowPathCases;
3057     
3058     bool result = getIntTypedArrayStoreOperand(
3059         value, property,
3060 #if USE(JSVALUE32_64)
3061         propertyTag, valueTag,
3062 #endif
3063         valueUse, slowPathCases, isClamped(type));
3064     if (!result) {
3065         noResult(node);
3066         return;
3067     }
3068
3069     GPRReg valueGPR = value.gpr();
3070 #if USE(JSVALUE32_64)
3071     GPRReg propertyTagGPR = propertyTag.gpr();
3072     GPRReg valueTagGPR = valueTag.gpr();
3073 #endif
3074
3075     ASSERT_UNUSED(valueGPR, valueGPR != property);
3076     ASSERT(valueGPR != base);
3077     ASSERT(valueGPR != storageReg);
3078     JITCompiler::Jump outOfBounds = jumpForTypedArrayOutOfBounds(node, base, property);
3079
3080     switch (elementSize(type)) {
3081     case 1:
3082         m_jit.store8(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesOne));
3083         break;
3084     case 2:
3085         m_jit.store16(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesTwo));
3086         break;
3087     case 4:
3088         m_jit.store32(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
3089         break;
3090     default:
3091         CRASH();
3092     }
3093
3094     JITCompiler::Jump done = jumpForTypedArrayIsNeuteredIfOutOfBounds(node, base, outOfBounds);
3095     if (done.isSet())
3096         done.link(&m_jit);
3097
3098     if (!slowPathCases.empty()) {
3099 #if USE(JSVALUE64)
3100         if (node->op() == PutByValDirect) {
3101             addSlowPathGenerator(slowPathCall(
3102                 slowPathCases, this,
3103                 m_jit.isStrictModeFor(node->origin.semantic) ? operationPutByValDirectStrict : operationPutByValDirectNonStrict,
3104                 NoResult, base, property, valueGPR));
3105         } else {
3106             addSlowPathGenerator(slowPathCall(
3107                 slowPathCases, this,
3108                 m_jit.isStrictModeFor(node->origin.semantic) ? operationPutByValStrict : operationPutByValNonStrict,
3109                 NoResult, base, property, valueGPR));
3110         }
3111 #else // not USE(JSVALUE64)
3112         if (node->op() == PutByValDirect) {
3113             addSlowPathGenerator(slowPathCall(
3114                 slowPathCases, this,
3115                 m_jit.codeBlock()->isStrictMode() ? operationPutByValDirectCellStrict : operationPutByValDirectCellNonStrict,
3116                 NoResult, base, JSValueRegs(propertyTagGPR, property), JSValueRegs(valueTagGPR, valueGPR)));
3117         } else {
3118             addSlowPathGenerator(slowPathCall(
3119                 slowPathCases, this,
3120                 m_jit.codeBlock()->isStrictMode() ? operationPutByValCellStrict : operationPutByValCellNonStrict,
3121                 NoResult, base, JSValueRegs(propertyTagGPR, property), JSValueRegs(valueTagGPR, valueGPR)));
3122         }
3123 #endif
3124     }
3125     
3126     noResult(node);
3127 }
3128
3129 void SpeculativeJIT::compileGetByValOnFloatTypedArray(Node* node, TypedArrayType type)
3130 {
3131     ASSERT(isFloat(type));
3132     
3133     SpeculateCellOperand base(this, m_graph.varArgChild(node, 0));
3134     SpeculateStrictInt32Operand property(this, m_graph.varArgChild(node, 1));
3135     StorageOperand storage(this, m_graph.varArgChild(node, 2));
3136
3137     GPRReg baseReg = base.gpr();
3138     GPRReg propertyReg = property.gpr();
3139     GPRReg storageReg = storage.gpr();
3140
3141     ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(m_graph.varArgChild(node, 0))));
3142
3143     FPRTemporary result(this);
3144     FPRReg resultReg = result.fpr();
3145     emitTypedArrayBoundsCheck(node, baseReg, propertyReg);
3146     if (m_indexMaskingMode == IndexMaskingEnabled)
3147         m_jit.and32(MacroAssembler::Address(baseReg, JSObject::butterflyIndexingMaskOffset()), propertyReg);
3148     switch (elementSize(type)) {
3149     case 4:
3150         m_jit.loadFloat(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
3151         m_jit.convertFloatToDouble(resultReg, resultReg);
3152         break;
3153     case 8: {
3154         m_jit.loadDouble(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight), resultReg);
3155         break;
3156     }
3157     default:
3158         RELEASE_ASSERT_NOT_REACHED();
3159     }
3160     
3161     doubleResult(resultReg, node);
3162 }
3163
3164 void SpeculativeJIT::compilePutByValForFloatTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
3165 {
3166     ASSERT(isFloat(type));
3167     
3168     StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
3169     GPRReg storageReg = storage.gpr();
3170     
3171     Edge baseUse = m_jit.graph().varArgChild(node, 0);
3172     Edge valueUse = m_jit.graph().varArgChild(node, 2);
3173
3174     SpeculateDoubleOperand valueOp(this, valueUse);
3175     FPRTemporary scratch(this);
3176     FPRReg valueFPR = valueOp.fpr();
3177     FPRReg scratchFPR = scratch.fpr();
3178
3179     ASSERT_UNUSED(baseUse, node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(baseUse)));
3180     
3181     MacroAssembler::Jump outOfBounds = jumpForTypedArrayOutOfBounds(node, base, property);
3182     
3183     switch (elementSize(type)) {
3184     case 4: {
3185         m_jit.moveDouble(valueFPR, scratchFPR);
3186         m_jit.convertDoubleToFloat(valueFPR, scratchFPR);
3187         m_jit.storeFloat(scratchFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
3188         break;
3189     }
3190     case 8:
3191         m_jit.storeDouble(valueFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesEight));
3192         break;
3193     default:
3194         RELEASE_ASSERT_NOT_REACHED();
3195     }
3196
3197     JITCompiler::Jump done = jumpForTypedArrayIsNeuteredIfOutOfBounds(node, base, outOfBounds);
3198     if (done.isSet())
3199         done.link(&m_jit);
3200     noResult(node);
3201 }
3202
3203 void SpeculativeJIT::compileGetByValForObjectWithString(Node* node)
3204 {
3205     SpeculateCellOperand arg1(this, m_graph.varArgChild(node, 0));
3206     SpeculateCellOperand arg2(this, m_graph.varArgChild(node, 1));
3207
3208     GPRReg arg1GPR = arg1.gpr();
3209     GPRReg arg2GPR = arg2.gpr();
3210
3211     speculateObject(m_graph.varArgChild(node, 0), arg1GPR);
3212     speculateString(m_graph.varArgChild(node, 1), arg2GPR);
3213
3214     flushRegisters();
3215     JSValueRegsFlushedCallResult result(this);
3216     JSValueRegs resultRegs = result.regs();
3217     callOperation(operationGetByValObjectString, resultRegs, arg1GPR, arg2GPR);
3218     m_jit.exceptionCheck();
3219
3220     jsValueResult(resultRegs, node);
3221 }
3222
3223 void SpeculativeJIT::compileGetByValForObjectWithSymbol(Node* node)
3224 {
3225     SpeculateCellOperand arg1(this, m_graph.varArgChild(node, 0));
3226     SpeculateCellOperand arg2(this, m_graph.varArgChild(node, 1));
3227
3228     GPRReg arg1GPR = arg1.gpr();
3229     GPRReg arg2GPR = arg2.gpr();
3230
3231     speculateObject(m_graph.varArgChild(node, 0), arg1GPR);
3232     speculateSymbol(m_graph.varArgChild(node, 1), arg2GPR);
3233
3234     flushRegisters();
3235     JSValueRegsFlushedCallResult result(this);
3236     JSValueRegs resultRegs = result.regs();
3237     callOperation(operationGetByValObjectSymbol, resultRegs, arg1GPR, arg2GPR);
3238     m_jit.exceptionCheck();
3239
3240     jsValueResult(resultRegs, node);
3241 }
3242
3243 void SpeculativeJIT::compilePutByValForCellWithString(Node* node, Edge& child1, Edge& child2, Edge& child3)
3244 {
3245     SpeculateCellOperand arg1(this, child1);
3246     SpeculateCellOperand arg2(this, child2);
3247     JSValueOperand arg3(this, child3);
3248
3249     GPRReg arg1GPR = arg1.gpr();
3250     GPRReg arg2GPR = arg2.gpr();
3251     JSValueRegs arg3Regs = arg3.jsValueRegs();
3252
3253     speculateString(child2, arg2GPR);
3254
3255     flushRegisters();
3256     callOperation(m_jit.isStrictModeFor(node->origin.semantic) ? operationPutByValCellStringStrict : operationPutByValCellStringNonStrict, arg1GPR, arg2GPR, arg3Regs);
3257     m_jit.exceptionCheck();
3258
3259     noResult(node);
3260 }
3261
3262 void SpeculativeJIT::compilePutByValForCellWithSymbol(Node* node, Edge& child1, Edge& child2, Edge& child3)
3263 {
3264     SpeculateCellOperand arg1(this, child1);
3265     SpeculateCellOperand arg2(this, child2);
3266     JSValueOperand arg3(this, child3);
3267
3268     GPRReg arg1GPR = arg1.gpr();
3269     GPRReg arg2GPR = arg2.gpr();
3270     JSValueRegs arg3Regs = arg3.jsValueRegs();
3271
3272     speculateSymbol(child2, arg2GPR);
3273
3274     flushRegisters();
3275     callOperation(m_jit.isStrictModeFor(node->origin.semantic) ? operationPutByValCellSymbolStrict : operationPutByValCellSymbolNonStrict, arg1GPR, arg2GPR, arg3Regs);
3276     m_jit.exceptionCheck();
3277
3278     noResult(node);
3279 }
3280
3281 void SpeculativeJIT::compileInstanceOfForObject(Node*, GPRReg valueReg, GPRReg prototypeReg, GPRReg scratchReg, GPRReg scratch2Reg, GPRReg scratch3Reg)
3282 {
3283     // Check that prototype is an object.
3284     speculationCheck(BadType, JSValueRegs(), 0, m_jit.branchIfNotObject(prototypeReg));
3285     
3286     // Initialize scratchReg with the value being checked.
3287     m_jit.move(valueReg, scratchReg);
3288     
3289     // Walk up the prototype chain of the value (in scratchReg), comparing to prototypeReg.
3290     MacroAssembler::Label loop(&m_jit);
3291     MacroAssembler::Jump performDefaultHasInstance = m_jit.branch8(MacroAssembler::Equal,
3292         MacroAssembler::Address(scratchReg, JSCell::typeInfoTypeOffset()), TrustedImm32(ProxyObjectType));
3293     m_jit.emitLoadStructure(*m_jit.vm(), scratchReg, scratch3Reg, scratch2Reg);
3294 #if USE(JSVALUE64)
3295     m_jit.load64(MacroAssembler::Address(scratch3Reg, Structure::prototypeOffset()), scratch3Reg);
3296     auto hasMonoProto = m_jit.branchTest64(JITCompiler::NonZero, scratch3Reg);
3297     m_jit.load64(JITCompiler::Address(scratchReg, offsetRelativeToBase(knownPolyProtoOffset)), scratch3Reg);
3298     hasMonoProto.link(&m_jit);
3299     m_jit.move(scratch3Reg, scratchReg);
3300 #else
3301     m_jit.load32(MacroAssembler::Address(scratch3Reg, Structure::prototypeOffset() + TagOffset), scratch2Reg);
3302     m_jit.load32(MacroAssembler::Address(scratch3Reg, Structure::prototypeOffset() + PayloadOffset), scratch3Reg);
3303     auto hasMonoProto = m_jit.branch32(CCallHelpers::NotEqual, scratch2Reg, TrustedImm32(JSValue::EmptyValueTag));
3304     m_jit.load32(JITCompiler::Address(scratchReg, offsetRelativeToBase(knownPolyProtoOffset) + PayloadOffset), scratch3Reg);
3305     hasMonoProto.link(&m_jit);
3306     m_jit.move(scratch3Reg, scratchReg);
3307 #endif
3308
3309     MacroAssembler::Jump isInstance = m_jit.branchPtr(MacroAssembler::Equal, scratchReg, prototypeReg);
3310 #if USE(JSVALUE64)
3311     m_jit.branchIfCell(JSValueRegs(scratchReg)).linkTo(loop, &m_jit);
3312 #else
3313     m_jit.branchTestPtr(MacroAssembler::NonZero, scratchReg).linkTo(loop, &m_jit);
3314 #endif
3315     
3316     // No match - result is false.
3317 #if USE(JSVALUE64)
3318     m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(false))), scratchReg);
3319 #else
3320     m_jit.move(MacroAssembler::TrustedImm32(0), scratchReg);
3321 #endif
3322     MacroAssembler::JumpList doneJumps; 
3323     doneJumps.append(m_jit.jump());
3324
3325     performDefaultHasInstance.link(&m_jit);
3326     silentSpillAllRegisters(scratchReg);
3327     callOperation(operationDefaultHasInstance, scratchReg, valueReg, prototypeReg); 
3328     silentFillAllRegisters();
3329     m_jit.exceptionCheck();
3330 #if USE(JSVALUE64)
3331     m_jit.or32(TrustedImm32(ValueFalse), scratchReg);
3332 #endif
3333     doneJumps.append(m_jit.jump());
3334     
3335     isInstance.link(&m_jit);
3336 #if USE(JSVALUE64)
3337     m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(true))), scratchReg);
3338 #else
3339     m_jit.move(MacroAssembler::TrustedImm32(1), scratchReg);
3340 #endif
3341     
3342     doneJumps.link(&m_jit);
3343 }
3344
3345 void SpeculativeJIT::compileCheckTypeInfoFlags(Node* node)
3346 {
3347     SpeculateCellOperand base(this, node->child1());
3348
3349     GPRReg baseGPR = base.gpr();
3350
3351     speculationCheck(BadTypeInfoFlags, JSValueRegs(), 0, m_jit.branchTest8(MacroAssembler::Zero, MacroAssembler::Address(baseGPR, JSCell::typeInfoFlagsOffset()), MacroAssembler::TrustedImm32(node->typeInfoOperand())));
3352
3353     noResult(node);
3354 }
3355
3356 void SpeculativeJIT::compileParseInt(Node* node)
3357 {
3358     RELEASE_ASSERT(node->child1().useKind() == UntypedUse || node->child1().useKind() == StringUse);
3359     if (node->child2()) {
3360         SpeculateInt32Operand radix(this, node->child2());
3361         GPRReg radixGPR = radix.gpr();
3362         if (node->child1().useKind() == UntypedUse) {
3363             JSValueOperand value(this, node->child1());
3364             JSValueRegs valueRegs = value.jsValueRegs();
3365
3366             flushRegisters();
3367             JSValueRegsFlushedCallResult result(this);
3368             JSValueRegs resultRegs = result.regs();
3369             callOperation(operationParseIntGeneric, resultRegs, valueRegs, radixGPR);
3370             m_jit.exceptionCheck();
3371             jsValueResult(resultRegs, node);
3372             return;
3373         }
3374
3375         SpeculateCellOperand value(this, node->child1());
3376         GPRReg valueGPR = value.gpr();
3377         speculateString(node->child1(), valueGPR);
3378
3379         flushRegisters();
3380         JSValueRegsFlushedCallResult result(this);
3381         JSValueRegs resultRegs = result.regs();
3382         callOperation(operationParseIntString, resultRegs, valueGPR, radixGPR);
3383         m_jit.exceptionCheck();
3384         jsValueResult(resultRegs, node);
3385         return;
3386     }
3387
3388     if (node->child1().useKind() == UntypedUse) {
3389         JSValueOperand value(this, node->child1());
3390         JSValueRegs valueRegs = value.jsValueRegs();
3391
3392         flushRegisters();
3393         JSValueRegsFlushedCallResult result(this);
3394         JSValueRegs resultRegs = result.regs();
3395         callOperation(operationParseIntNoRadixGeneric, resultRegs, valueRegs);
3396         m_jit.exceptionCheck();
3397         jsValueResult(resultRegs, node);
3398         return;
3399     }
3400
3401     SpeculateCellOperand value(this, node->child1());
3402     GPRReg valueGPR = value.gpr();
3403     speculateString(node->child1(), valueGPR);
3404
3405     flushRegisters();
3406     JSValueRegsFlushedCallResult result(this);
3407     JSValueRegs resultRegs = result.regs();
3408     callOperation(operationParseIntStringNoRadix, resultRegs, valueGPR);
3409     m_jit.exceptionCheck();
3410     jsValueResult(resultRegs, node);
3411 }
3412
3413 void SpeculativeJIT::compileInstanceOf(Node* node)
3414 {
3415     if (node->child1().useKind() == UntypedUse) {
3416         // It might not be a cell. Speculate less aggressively.
3417         // Or: it might only be used once (i.e. by us), so we get zero benefit
3418         // from speculating any more aggressively than we absolutely need to.
3419         
3420         JSValueOperand value(this, node->child1());
3421         SpeculateCellOperand prototype(this, node->child2());
3422         GPRTemporary scratch(this);
3423         GPRTemporary scratch2(this);
3424         GPRTemporary scratch3(this);
3425         
3426         GPRReg prototypeReg = prototype.gpr();
3427         GPRReg scratchReg = scratch.gpr();
3428         GPRReg scratch2Reg = scratch2.gpr();
3429         GPRReg scratch3Reg = scratch3.gpr();
3430         
3431         MacroAssembler::Jump isCell = m_jit.branchIfCell(value.jsValueRegs());
3432         GPRReg valueReg = value.jsValueRegs().payloadGPR();
3433         moveFalseTo(scratchReg);
3434
3435         MacroAssembler::Jump done = m_jit.jump();
3436         
3437         isCell.link(&m_jit);
3438         
3439         compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg, scratch2Reg, scratch3Reg);
3440         
3441         done.link(&m_jit);
3442
3443         blessedBooleanResult(scratchReg, node);
3444         return;
3445     }
3446     
3447     SpeculateCellOperand value(this, node->child1());
3448     SpeculateCellOperand prototype(this, node->child2());
3449     
3450     GPRTemporary scratch(this);
3451     GPRTemporary scratch2(this);
3452     GPRTemporary scratch3(this);
3453     
3454     GPRReg valueReg = value.gpr();
3455     GPRReg prototypeReg = prototype.gpr();
3456     GPRReg scratchReg = scratch.gpr();
3457     GPRReg scratch2Reg = scratch2.gpr();
3458     GPRReg scratch3Reg = scratch3.gpr();
3459     
3460     compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg, scratch2Reg, scratch3Reg);
3461
3462     blessedBooleanResult(scratchReg, node);
3463 }
3464
3465 template<typename SnippetGenerator, J_JITOperation_EJJ snippetSlowPathFunction>
3466 void SpeculativeJIT::emitUntypedBitOp(Node* node)
3467 {
3468     Edge& leftChild = node->child1();
3469     Edge& rightChild = node->child2();
3470
3471     if (isKnownNotNumber(leftChild.node()) || isKnownNotNumber(rightChild.node())) {
3472         JSValueOperand left(this, leftChild);
3473         JSValueOperand right(this, rightChild);
3474         JSValueRegs leftRegs = left.jsValueRegs();
3475         JSValueRegs rightRegs = right.jsValueRegs();
3476
3477         flushRegisters();
3478         JSValueRegsFlushedCallResult result(this);
3479         JSValueRegs resultRegs = result.regs();
3480         callOperation(snippetSlowPathFunction, resultRegs, leftRegs, rightRegs);
3481         m_jit.exceptionCheck();
3482
3483         jsValueResult(resultRegs, node);
3484         return;
3485     }
3486
3487     std::optional<JSValueOperand> left;
3488     std::optional<JSValueOperand> right;
3489
3490     JSValueRegs leftRegs;
3491     JSValueRegs rightRegs;
3492
3493 #if USE(JSVALUE64)
3494     GPRTemporary result(this);
3495     JSValueRegs resultRegs = JSValueRegs(result.gpr());
3496     GPRTemporary scratch(this);
3497     GPRReg scratchGPR = scratch.gpr();
3498 #else
3499     GPRTemporary resultTag(this);
3500     GPRTemporary resultPayload(this);
3501     JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3502     GPRReg scratchGPR = resultTag.gpr();
3503 #endif
3504
3505     SnippetOperand leftOperand;
3506     SnippetOperand rightOperand;
3507
3508     // The snippet generator does not support both operands being constant. If the left
3509     // operand is already const, we'll ignore the right operand's constness.
3510     if (leftChild->isInt32Constant())
3511         leftOperand.setConstInt32(leftChild->asInt32());
3512     else if (rightChild->isInt32Constant())
3513         rightOperand.setConstInt32(rightChild->asInt32());
3514
3515     RELEASE_ASSERT(!leftOperand.isConst() || !rightOperand.isConst());
3516
3517     if (!leftOperand.isConst()) {
3518         left.emplace(this, leftChild);
3519         leftRegs = left->jsValueRegs();
3520     }
3521     if (!rightOperand.isConst()) {
3522         right.emplace(this, rightChild);
3523         rightRegs = right->jsValueRegs();
3524     }
3525
3526     SnippetGenerator gen(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs, scratchGPR);
3527     gen.generateFastPath(m_jit);
3528
3529     ASSERT(gen.didEmitFastPath());
3530     gen.endJumpList().append(m_jit.jump());
3531
3532     gen.slowPathJumpList().link(&m_jit);
3533     silentSpillAllRegisters(resultRegs);
3534
3535     if (leftOperand.isConst()) {
3536         leftRegs = resultRegs;
3537         m_jit.moveValue(leftChild->asJSValue(), leftRegs);
3538     } else if (rightOperand.isConst()) {
3539         rightRegs = resultRegs;
3540         m_jit.moveValue(rightChild->asJSValue(), rightRegs);
3541     }
3542
3543     callOperation(snippetSlowPathFunction, resultRegs, leftRegs, rightRegs);
3544
3545     silentFillAllRegisters();
3546     m_jit.exceptionCheck();
3547
3548     gen.endJumpList().link(&m_jit);
3549     jsValueResult(resultRegs, node);
3550 }
3551
3552 void SpeculativeJIT::compileBitwiseOp(Node* node)
3553 {
3554     NodeType op = node->op();
3555     Edge& leftChild = node->child1();
3556     Edge& rightChild = node->child2();
3557
3558     if (leftChild.useKind() == UntypedUse || rightChild.useKind() == UntypedUse) {
3559         switch (op) {
3560         case BitAnd:
3561             emitUntypedBitOp<JITBitAndGenerator, operationValueBitAnd>(node);
3562             return;
3563         case BitOr:
3564             emitUntypedBitOp<JITBitOrGenerator, operationValueBitOr>(node);
3565             return;
3566         case BitXor:
3567             emitUntypedBitOp<JITBitXorGenerator, operationValueBitXor>(node);
3568             return;
3569         default:
3570             RELEASE_ASSERT_NOT_REACHED();
3571         }
3572     }
3573
3574     if (leftChild->isInt32Constant()) {
3575         SpeculateInt32Operand op2(this, rightChild);
3576         GPRTemporary result(this, Reuse, op2);
3577
3578         bitOp(op, leftChild->asInt32(), op2.gpr(), result.gpr());
3579
3580         int32Result(result.gpr(), node);
3581
3582     } else if (rightChild->isInt32Constant()) {
3583         SpeculateInt32Operand op1(this, leftChild);
3584         GPRTemporary result(this, Reuse, op1);
3585
3586         bitOp(op, rightChild->asInt32(), op1.gpr(), result.gpr());
3587
3588         int32Result(result.gpr(), node);
3589
3590     } else {
3591         SpeculateInt32Operand op1(this, leftChild);
3592         SpeculateInt32Operand op2(this, rightChild);
3593         GPRTemporary result(this, Reuse, op1, op2);
3594         
3595         GPRReg reg1 = op1.gpr();
3596         GPRReg reg2 = op2.gpr();
3597         bitOp(op, reg1, reg2, result.gpr());
3598         
3599         int32Result(result.gpr(), node);
3600     }
3601 }
3602
3603 void SpeculativeJIT::emitUntypedRightShiftBitOp(Node* node)
3604 {
3605     J_JITOperation_EJJ snippetSlowPathFunction = node->op() == BitRShift
3606         ? operationValueBitRShift : operationValueBitURShift;
3607     JITRightShiftGenerator::ShiftType shiftType = node->op() == BitRShift
3608         ? JITRightShiftGenerator::SignedShift : JITRightShiftGenerator::UnsignedShift;
3609
3610     Edge& leftChild = node->child1();
3611     Edge& rightChild = node->child2();
3612
3613     if (isKnownNotNumber(leftChild.node()) || isKnownNotNumber(rightChild.node())) {
3614         JSValueOperand left(this, leftChild);
3615         JSValueOperand right(this, rightChild);
3616         JSValueRegs leftRegs = left.jsValueRegs();
3617         JSValueRegs rightRegs = right.jsValueRegs();
3618
3619         flushRegisters();
3620         JSValueRegsFlushedCallResult result(this);
3621         JSValueRegs resultRegs = result.regs();
3622         callOperation(snippetSlowPathFunction, resultRegs, leftRegs, rightRegs);
3623         m_jit.exceptionCheck();
3624
3625         jsValueResult(resultRegs, node);
3626         return;
3627     }
3628
3629     std::optional<JSValueOperand> left;
3630     std::optional<JSValueOperand> right;
3631
3632     JSValueRegs leftRegs;
3633     JSValueRegs rightRegs;
3634
3635     FPRTemporary leftNumber(this);
3636     FPRReg leftFPR = leftNumber.fpr();
3637
3638 #if USE(JSVALUE64)
3639     GPRTemporary result(this);
3640     JSValueRegs resultRegs = JSValueRegs(result.gpr());
3641     GPRTemporary scratch(this);
3642     GPRReg scratchGPR = scratch.gpr();
3643     FPRReg scratchFPR = InvalidFPRReg;
3644 #else
3645     GPRTemporary resultTag(this);
3646     GPRTemporary resultPayload(this);
3647     JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3648     GPRReg scratchGPR = resultTag.gpr();
3649     FPRTemporary fprScratch(this);
3650     FPRReg scratchFPR = fprScratch.fpr();
3651 #endif
3652
3653     SnippetOperand leftOperand;
3654     SnippetOperand rightOperand;
3655
3656     // The snippet generator does not support both operands being constant. If the left
3657     // operand is already const, we'll ignore the right operand's constness.
3658     if (leftChild->isInt32Constant())
3659         leftOperand.setConstInt32(leftChild->asInt32());
3660     else if (rightChild->isInt32Constant())
3661         rightOperand.setConstInt32(rightChild->asInt32());
3662
3663     RELEASE_ASSERT(!leftOperand.isConst() || !rightOperand.isConst());
3664
3665     if (!leftOperand.isConst()) {
3666         left.emplace(this, leftChild);
3667         leftRegs = left->jsValueRegs();
3668     }
3669     if (!rightOperand.isConst()) {
3670         right.emplace(this, rightChild);
3671         rightRegs = right->jsValueRegs();
3672     }
3673
3674     JITRightShiftGenerator gen(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs,
3675         leftFPR, scratchGPR, scratchFPR, shiftType);
3676     gen.generateFastPath(m_jit);
3677
3678     ASSERT(gen.didEmitFastPath());
3679     gen.endJumpList().append(m_jit.jump());
3680
3681     gen.slowPathJumpList().link(&m_jit);
3682     silentSpillAllRegisters(resultRegs);
3683
3684     if (leftOperand.isConst()) {
3685         leftRegs = resultRegs;
3686         m_jit.moveValue(leftChild->asJSValue(), leftRegs);
3687     } else if (rightOperand.isConst()) {
3688         rightRegs = resultRegs;
3689         m_jit.moveValue(rightChild->asJSValue(), rightRegs);
3690     }
3691
3692     callOperation(snippetSlowPathFunction, resultRegs, leftRegs, rightRegs);
3693
3694     silentFillAllRegisters();
3695     m_jit.exceptionCheck();
3696
3697     gen.endJumpList().link(&m_jit);
3698     jsValueResult(resultRegs, node);
3699     return;
3700 }
3701
3702 void SpeculativeJIT::compileShiftOp(Node* node)
3703 {
3704     NodeType op = node->op();
3705     Edge& leftChild = node->child1();
3706     Edge& rightChild = node->child2();
3707
3708     if (leftChild.useKind() == UntypedUse || rightChild.useKind() == UntypedUse) {
3709         switch (op) {
3710         case BitLShift:
3711             emitUntypedBitOp<JITLeftShiftGenerator, operationValueBitLShift>(node);
3712             return;
3713         case BitRShift:
3714         case BitURShift:
3715             emitUntypedRightShiftBitOp(node);
3716             return;
3717         default:
3718             RELEASE_ASSERT_NOT_REACHED();
3719         }
3720     }
3721
3722     if (rightChild->isInt32Constant()) {
3723         SpeculateInt32Operand op1(this, leftChild);
3724         GPRTemporary result(this, Reuse, op1);
3725
3726         shiftOp(op, op1.gpr(), rightChild->asInt32() & 0x1f, result.gpr());
3727
3728         int32Result(result.gpr(), node);
3729     } else {
3730         // Do not allow shift amount to be used as the result, MacroAssembler does not permit this.
3731         SpeculateInt32Operand op1(this, leftChild);
3732         SpeculateInt32Operand op2(this, rightChild);
3733         GPRTemporary result(this, Reuse, op1);
3734
3735         GPRReg reg1 = op1.gpr();
3736         GPRReg reg2 = op2.gpr();
3737         shiftOp(op, reg1, reg2, result.gpr());
3738
3739         int32Result(result.gpr(), node);
3740     }
3741 }
3742
3743 void SpeculativeJIT::compileValueAdd(Node* node)
3744 {
3745     Edge& leftChild = node->child1();
3746     Edge& rightChild = node->child2();
3747
3748     if (isKnownNotNumber(leftChild.node()) || isKnownNotNumber(rightChild.node())) {
3749         JSValueOperand left(this, leftChild);
3750         JSValueOperand right(this, rightChild);
3751         JSValueRegs leftRegs = left.jsValueRegs();
3752         JSValueRegs rightRegs = right.jsValueRegs();
3753
3754         flushRegisters();
3755         JSValueRegsFlushedCallResult result(this);
3756         JSValueRegs resultRegs = result.regs();
3757         callOperation(operationValueAddNotNumber, resultRegs, leftRegs, rightRegs);
3758         m_jit.exceptionCheck();
3759     
3760         jsValueResult(resultRegs, node);
3761         return;
3762     }
3763
3764 #if USE(JSVALUE64)
3765     bool needsScratchGPRReg = true;
3766     bool needsScratchFPRReg = false;
3767 #else
3768     bool needsScratchGPRReg = true;
3769     bool needsScratchFPRReg = true;
3770 #endif
3771
3772     ArithProfile* arithProfile = m_jit.graph().baselineCodeBlockFor(node->origin.semantic)->arithProfileForBytecodeOffset(node->origin.semantic.bytecodeIndex);
3773     JITAddIC* addIC = m_jit.codeBlock()->addJITAddIC(arithProfile);
3774     auto repatchingFunction = operationValueAddOptimize;
3775     auto nonRepatchingFunction = operationValueAdd;
3776     
3777     compileMathIC(node, addIC, needsScratchGPRReg, needsScratchFPRReg, repatchingFunction, nonRepatchingFunction);
3778 }
3779
3780 template <typename Generator, typename RepatchingFunction, typename NonRepatchingFunction>
3781 void SpeculativeJIT::compileMathIC(Node* node, JITBinaryMathIC<Generator>* mathIC, bool needsScratchGPRReg, bool needsScratchFPRReg, RepatchingFunction repatchingFunction, NonRepatchingFunction nonRepatchingFunction)
3782 {
3783     Edge& leftChild = node->child1();
3784     Edge& rightChild = node->child2();
3785
3786     std::optional<JSValueOperand> left;
3787     std::optional<JSValueOperand> right;
3788
3789     JSValueRegs leftRegs;
3790     JSValueRegs rightRegs;
3791
3792     FPRTemporary leftNumber(this);
3793     FPRTemporary rightNumber(this);
3794     FPRReg leftFPR = leftNumber.fpr();
3795     FPRReg rightFPR = rightNumber.fpr();
3796
3797     GPRReg scratchGPR = InvalidGPRReg;
3798     FPRReg scratchFPR = InvalidFPRReg;
3799
3800     std::optional<FPRTemporary> fprScratch;
3801     if (needsScratchFPRReg) {
3802         fprScratch.emplace(this);
3803         scratchFPR = fprScratch->fpr();
3804     }
3805
3806 #if USE(JSVALUE64)
3807     std::optional<GPRTemporary> gprScratch;
3808     if (needsScratchGPRReg) {
3809         gprScratch.emplace(this);
3810         scratchGPR = gprScratch->gpr();
3811     }
3812     GPRTemporary result(this);
3813     JSValueRegs resultRegs = JSValueRegs(result.gpr());
3814 #else
3815     GPRTemporary resultTag(this);
3816     GPRTemporary resultPayload(this);
3817     JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3818     if (needsScratchGPRReg)
3819         scratchGPR = resultRegs.tagGPR();
3820 #endif
3821
3822     SnippetOperand leftOperand(m_state.forNode(leftChild).resultType());
3823     SnippetOperand rightOperand(m_state.forNode(rightChild).resultType());
3824
3825     // The snippet generator does not support both operands being constant. If the left
3826     // operand is already const, we'll ignore the right operand's constness.
3827     if (leftChild->isInt32Constant())
3828         leftOperand.setConstInt32(leftChild->asInt32());
3829     else if (rightChild->isInt32Constant())
3830         rightOperand.setConstInt32(rightChild->asInt32());
3831
3832     ASSERT(!leftOperand.isConst() || !rightOperand.isConst());
3833     ASSERT(!(Generator::isLeftOperandValidConstant(leftOperand) && Generator::isRightOperandValidConstant(rightOperand)));
3834
3835     if (!Generator::isLeftOperandValidConstant(leftOperand)) {
3836         left.emplace(this, leftChild);
3837         leftRegs = left->jsValueRegs();
3838     }
3839     if (!Generator::isRightOperandValidConstant(rightOperand)) {
3840         right.emplace(this, rightChild);
3841         rightRegs = right->jsValueRegs();
3842     }
3843
3844 #if ENABLE(MATH_IC_STATS)
3845     auto inlineStart = m_jit.label();
3846 #endif
3847
3848     Box<MathICGenerationState> addICGenerationState = Box<MathICGenerationState>::create();
3849     mathIC->m_generator = Generator(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs, leftFPR, rightFPR, scratchGPR, scratchFPR);
3850
3851     bool shouldEmitProfiling = false;
3852     bool generatedInline = mathIC->generateInline(m_jit, *addICGenerationState, shouldEmitProfiling);
3853
3854     if (generatedInline) {
3855         ASSERT(!addICGenerationState->slowPathJumps.empty());
3856
3857         Vector<SilentRegisterSavePlan> savePlans;
3858         silentSpillAllRegistersImpl(false, savePlans, resultRegs);
3859
3860         auto done = m_jit.label();
3861
3862         addSlowPathGenerator([=, savePlans = WTFMove(savePlans)] () {
3863             addICGenerationState->slowPathJumps.link(&m_jit);
3864             addICGenerationState->slowPathStart = m_jit.label();
3865 #if ENABLE(MATH_IC_STATS)
3866             auto slowPathStart = m_jit.label();
3867 #endif
3868
3869             silentSpill(savePlans);
3870
3871             auto innerLeftRegs = leftRegs;
3872             auto innerRightRegs = rightRegs;
3873             if (Generator::isLeftOperandValidConstant(leftOperand)) {
3874                 innerLeftRegs = resultRegs;
3875                 m_jit.moveValue(leftChild->asJSValue(), innerLeftRegs);
3876             } else if (Generator::isRightOperandValidConstant(rightOperand)) {
3877                 innerRightRegs = resultRegs;
3878                 m_jit.moveValue(rightChild->asJSValue(), innerRightRegs);
3879             }
3880
3881             if (addICGenerationState->shouldSlowPathRepatch)
3882                 addICGenerationState->slowPathCall = callOperation(bitwise_cast<J_JITOperation_EJJMic>(repatchingFunction), resultRegs, innerLeftRegs, innerRightRegs, TrustedImmPtr(mathIC));
3883             else
3884                 addICGenerationState->slowPathCall = callOperation(nonRepatchingFunction, resultRegs, innerLeftRegs, innerRightRegs);
3885
3886             silentFill(savePlans);
3887             m_jit.exceptionCheck();
3888             m_jit.jump().linkTo(done, &m_jit);
3889
3890             m_jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
3891                 mathIC->finalizeInlineCode(*addICGenerationState, linkBuffer);
3892             });
3893