Add Options::alwaysCheckTraps() and Options::usePollingTraps() options.
[WebKit-https.git] / Source / JavaScriptCore / dfg / DFGSpeculativeJIT.cpp
1 /*
2  * Copyright (C) 2011-2017 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "DFGSpeculativeJIT.h"
28
29 #if ENABLE(DFG_JIT)
30
31 #include "BinarySwitch.h"
32 #include "DFGAbstractInterpreterInlines.h"
33 #include "DFGArrayifySlowPathGenerator.h"
34 #include "DFGCallArrayAllocatorSlowPathGenerator.h"
35 #include "DFGCallCreateDirectArgumentsSlowPathGenerator.h"
36 #include "DFGCapabilities.h"
37 #include "DFGDOMJITPatchpointParams.h"
38 #include "DFGMayExit.h"
39 #include "DFGOSRExitFuzz.h"
40 #include "DFGSaneStringGetByValSlowPathGenerator.h"
41 #include "DFGSlowPathGenerator.h"
42 #include "DOMJITPatchpoint.h"
43 #include "DirectArguments.h"
44 #include "JITAddGenerator.h"
45 #include "JITBitAndGenerator.h"
46 #include "JITBitOrGenerator.h"
47 #include "JITBitXorGenerator.h"
48 #include "JITDivGenerator.h"
49 #include "JITLeftShiftGenerator.h"
50 #include "JITMulGenerator.h"
51 #include "JITRightShiftGenerator.h"
52 #include "JITSubGenerator.h"
53 #include "JSAsyncFunction.h"
54 #include "JSCInlines.h"
55 #include "JSEnvironmentRecord.h"
56 #include "JSFixedArray.h"
57 #include "JSGeneratorFunction.h"
58 #include "JSLexicalEnvironment.h"
59 #include "LinkBuffer.h"
60 #include "RegExpConstructor.h"
61 #include "ScopedArguments.h"
62 #include "ScratchRegisterAllocator.h"
63 #include <wtf/BitVector.h>
64 #include <wtf/Box.h>
65 #include <wtf/MathExtras.h>
66
67 namespace JSC { namespace DFG {
68
69 SpeculativeJIT::SpeculativeJIT(JITCompiler& jit)
70     : m_compileOkay(true)
71     , m_jit(jit)
72     , m_currentNode(0)
73     , m_lastGeneratedNode(LastNodeType)
74     , m_indexInBlock(0)
75     , m_generationInfo(m_jit.graph().frameRegisterCount())
76     , m_state(m_jit.graph())
77     , m_interpreter(m_jit.graph(), m_state)
78     , m_stream(&jit.jitCode()->variableEventStream)
79     , m_minifiedGraph(&jit.jitCode()->minifiedDFG)
80 {
81 }
82
83 SpeculativeJIT::~SpeculativeJIT()
84 {
85 }
86
87 void SpeculativeJIT::emitAllocateRawObject(GPRReg resultGPR, RegisteredStructure structure, GPRReg storageGPR, unsigned numElements, unsigned vectorLength)
88 {
89     IndexingType indexingType = structure->indexingType();
90     bool hasIndexingHeader = hasIndexedProperties(indexingType);
91
92     unsigned inlineCapacity = structure->inlineCapacity();
93     unsigned outOfLineCapacity = structure->outOfLineCapacity();
94     
95     GPRTemporary scratch(this);
96     GPRTemporary scratch2(this);
97     GPRReg scratchGPR = scratch.gpr();
98     GPRReg scratch2GPR = scratch2.gpr();
99
100     ASSERT(vectorLength >= numElements);
101     vectorLength = Butterfly::optimalContiguousVectorLength(structure.get(), vectorLength);
102     
103     JITCompiler::JumpList slowCases;
104
105     size_t size = 0;
106     if (hasIndexingHeader)
107         size += vectorLength * sizeof(JSValue) + sizeof(IndexingHeader);
108     size += outOfLineCapacity * sizeof(JSValue);
109
110     m_jit.move(TrustedImmPtr(0), storageGPR);
111     
112     if (size) {
113         if (MarkedAllocator* allocator = m_jit.vm()->auxiliarySpace.allocatorFor(size)) {
114             m_jit.move(TrustedImmPtr(allocator), scratchGPR);
115             m_jit.emitAllocate(storageGPR, allocator, scratchGPR, scratch2GPR, slowCases);
116             
117             m_jit.addPtr(
118                 TrustedImm32(outOfLineCapacity * sizeof(JSValue) + sizeof(IndexingHeader)),
119                 storageGPR);
120             
121             if (hasIndexingHeader)
122                 m_jit.store32(TrustedImm32(vectorLength), MacroAssembler::Address(storageGPR, Butterfly::offsetOfVectorLength()));
123         } else
124             slowCases.append(m_jit.jump());
125     }
126
127     size_t allocationSize = JSFinalObject::allocationSize(inlineCapacity);
128     MarkedAllocator* allocatorPtr = subspaceFor<JSFinalObject>(*m_jit.vm())->allocatorFor(allocationSize);
129     if (allocatorPtr) {
130         m_jit.move(TrustedImmPtr(allocatorPtr), scratchGPR);
131         emitAllocateJSObject(resultGPR, allocatorPtr, scratchGPR, TrustedImmPtr(structure), storageGPR, scratch2GPR, slowCases);
132         m_jit.emitInitializeInlineStorage(resultGPR, structure->inlineCapacity());
133     } else
134         slowCases.append(m_jit.jump());
135
136     // I want a slow path that also loads out the storage pointer, and that's
137     // what this custom CallArrayAllocatorSlowPathGenerator gives me. It's a lot
138     // of work for a very small piece of functionality. :-/
139     addSlowPathGenerator(std::make_unique<CallArrayAllocatorSlowPathGenerator>(
140         slowCases, this, operationNewRawObject, resultGPR, storageGPR,
141         structure, vectorLength));
142
143     if (numElements < vectorLength) {
144 #if USE(JSVALUE64)
145         if (hasDouble(structure->indexingType()))
146             m_jit.move(TrustedImm64(bitwise_cast<int64_t>(PNaN)), scratchGPR);
147         else
148             m_jit.move(TrustedImm64(JSValue::encode(JSValue())), scratchGPR);
149         for (unsigned i = numElements; i < vectorLength; ++i)
150             m_jit.store64(scratchGPR, MacroAssembler::Address(storageGPR, sizeof(double) * i));
151 #else
152         EncodedValueDescriptor value;
153         if (hasDouble(structure->indexingType()))
154             value.asInt64 = JSValue::encode(JSValue(JSValue::EncodeAsDouble, PNaN));
155         else
156             value.asInt64 = JSValue::encode(JSValue());
157         for (unsigned i = numElements; i < vectorLength; ++i) {
158             m_jit.store32(TrustedImm32(value.asBits.tag), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
159             m_jit.store32(TrustedImm32(value.asBits.payload), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
160         }
161 #endif
162     }
163     
164     if (hasIndexingHeader)
165         m_jit.store32(TrustedImm32(numElements), MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()));
166     
167     m_jit.emitInitializeOutOfLineStorage(storageGPR, structure->outOfLineCapacity());
168     
169     m_jit.mutatorFence();
170 }
171
172 void SpeculativeJIT::emitGetLength(InlineCallFrame* inlineCallFrame, GPRReg lengthGPR, bool includeThis)
173 {
174     if (inlineCallFrame && !inlineCallFrame->isVarargs())
175         m_jit.move(TrustedImm32(inlineCallFrame->arguments.size() - !includeThis), lengthGPR);
176     else {
177         VirtualRegister argumentCountRegister = m_jit.argumentCount(inlineCallFrame);
178         m_jit.load32(JITCompiler::payloadFor(argumentCountRegister), lengthGPR);
179         if (!includeThis)
180             m_jit.sub32(TrustedImm32(1), lengthGPR);
181     }
182 }
183
184 void SpeculativeJIT::emitGetLength(CodeOrigin origin, GPRReg lengthGPR, bool includeThis)
185 {
186     emitGetLength(origin.inlineCallFrame, lengthGPR, includeThis);
187 }
188
189 void SpeculativeJIT::emitGetCallee(CodeOrigin origin, GPRReg calleeGPR)
190 {
191     if (origin.inlineCallFrame) {
192         if (origin.inlineCallFrame->isClosureCall) {
193             m_jit.loadPtr(
194                 JITCompiler::addressFor(origin.inlineCallFrame->calleeRecovery.virtualRegister()),
195                 calleeGPR);
196         } else {
197             m_jit.move(
198                 TrustedImmPtr::weakPointer(m_jit.graph(), origin.inlineCallFrame->calleeRecovery.constant().asCell()),
199                 calleeGPR);
200         }
201     } else
202         m_jit.loadPtr(JITCompiler::addressFor(CallFrameSlot::callee), calleeGPR);
203 }
204
205 void SpeculativeJIT::emitGetArgumentStart(CodeOrigin origin, GPRReg startGPR)
206 {
207     m_jit.addPtr(
208         TrustedImm32(
209             JITCompiler::argumentsStart(origin).offset() * static_cast<int>(sizeof(Register))),
210         GPRInfo::callFrameRegister, startGPR);
211 }
212
213 MacroAssembler::Jump SpeculativeJIT::emitOSRExitFuzzCheck()
214 {
215     if (!Options::useOSRExitFuzz()
216         || !canUseOSRExitFuzzing(m_jit.graph().baselineCodeBlockFor(m_origin.semantic))
217         || !doOSRExitFuzzing())
218         return MacroAssembler::Jump();
219     
220     MacroAssembler::Jump result;
221     
222     m_jit.pushToSave(GPRInfo::regT0);
223     m_jit.load32(&g_numberOfOSRExitFuzzChecks, GPRInfo::regT0);
224     m_jit.add32(TrustedImm32(1), GPRInfo::regT0);
225     m_jit.store32(GPRInfo::regT0, &g_numberOfOSRExitFuzzChecks);
226     unsigned atOrAfter = Options::fireOSRExitFuzzAtOrAfter();
227     unsigned at = Options::fireOSRExitFuzzAt();
228     if (at || atOrAfter) {
229         unsigned threshold;
230         MacroAssembler::RelationalCondition condition;
231         if (atOrAfter) {
232             threshold = atOrAfter;
233             condition = MacroAssembler::Below;
234         } else {
235             threshold = at;
236             condition = MacroAssembler::NotEqual;
237         }
238         MacroAssembler::Jump ok = m_jit.branch32(
239             condition, GPRInfo::regT0, MacroAssembler::TrustedImm32(threshold));
240         m_jit.popToRestore(GPRInfo::regT0);
241         result = m_jit.jump();
242         ok.link(&m_jit);
243     }
244     m_jit.popToRestore(GPRInfo::regT0);
245     
246     return result;
247 }
248
249 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail)
250 {
251     if (!m_compileOkay)
252         return;
253     JITCompiler::Jump fuzzJump = emitOSRExitFuzzCheck();
254     if (fuzzJump.isSet()) {
255         JITCompiler::JumpList jumpsToFail;
256         jumpsToFail.append(fuzzJump);
257         jumpsToFail.append(jumpToFail);
258         m_jit.appendExitInfo(jumpsToFail);
259     } else
260         m_jit.appendExitInfo(jumpToFail);
261     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(m_currentNode, node), this, m_stream->size()));
262 }
263
264 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, const MacroAssembler::JumpList& jumpsToFail)
265 {
266     if (!m_compileOkay)
267         return;
268     JITCompiler::Jump fuzzJump = emitOSRExitFuzzCheck();
269     if (fuzzJump.isSet()) {
270         JITCompiler::JumpList myJumpsToFail;
271         myJumpsToFail.append(jumpsToFail);
272         myJumpsToFail.append(fuzzJump);
273         m_jit.appendExitInfo(myJumpsToFail);
274     } else
275         m_jit.appendExitInfo(jumpsToFail);
276     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(m_currentNode, node), this, m_stream->size()));
277 }
278
279 OSRExitJumpPlaceholder SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node)
280 {
281     if (!m_compileOkay)
282         return OSRExitJumpPlaceholder();
283     unsigned index = m_jit.jitCode()->osrExit.size();
284     m_jit.appendExitInfo();
285     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(m_currentNode, node), this, m_stream->size()));
286     return OSRExitJumpPlaceholder(index);
287 }
288
289 OSRExitJumpPlaceholder SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse)
290 {
291     return speculationCheck(kind, jsValueSource, nodeUse.node());
292 }
293
294 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail)
295 {
296     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail);
297 }
298
299 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, const MacroAssembler::JumpList& jumpsToFail)
300 {
301     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpsToFail);
302 }
303
304 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
305 {
306     if (!m_compileOkay)
307         return;
308     unsigned recoveryIndex = m_jit.jitCode()->appendSpeculationRecovery(recovery);
309     m_jit.appendExitInfo(jumpToFail);
310     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(m_currentNode, node), this, m_stream->size(), recoveryIndex));
311 }
312
313 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
314 {
315     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail, recovery);
316 }
317
318 void SpeculativeJIT::emitInvalidationPoint(Node* node)
319 {
320     if (!m_compileOkay)
321         return;
322     OSRExitCompilationInfo& info = m_jit.appendExitInfo(JITCompiler::JumpList());
323     m_jit.jitCode()->appendOSRExit(OSRExit(
324         UncountableInvalidation, JSValueSource(), MethodOfGettingAValueProfile(),
325         this, m_stream->size()));
326     info.m_replacementSource = m_jit.watchpointLabel();
327     ASSERT(info.m_replacementSource.isSet());
328     noResult(node);
329 }
330
331 void SpeculativeJIT::unreachable(Node* node)
332 {
333     m_compileOkay = false;
334     m_jit.abortWithReason(DFGUnreachableNode, node->op());
335 }
336
337 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Node* node)
338 {
339     if (!m_compileOkay)
340         return;
341     speculationCheck(kind, jsValueRegs, node, m_jit.jump());
342     m_compileOkay = false;
343     if (verboseCompilationEnabled())
344         dataLog("Bailing compilation.\n");
345 }
346
347 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Edge nodeUse)
348 {
349     terminateSpeculativeExecution(kind, jsValueRegs, nodeUse.node());
350 }
351
352 void SpeculativeJIT::typeCheck(JSValueSource source, Edge edge, SpeculatedType typesPassedThrough, MacroAssembler::Jump jumpToFail, ExitKind exitKind)
353 {
354     ASSERT(needsTypeCheck(edge, typesPassedThrough));
355     m_interpreter.filter(edge, typesPassedThrough);
356     speculationCheck(exitKind, source, edge.node(), jumpToFail);
357 }
358
359 RegisterSet SpeculativeJIT::usedRegisters()
360 {
361     RegisterSet result;
362     
363     for (unsigned i = GPRInfo::numberOfRegisters; i--;) {
364         GPRReg gpr = GPRInfo::toRegister(i);
365         if (m_gprs.isInUse(gpr))
366             result.set(gpr);
367     }
368     for (unsigned i = FPRInfo::numberOfRegisters; i--;) {
369         FPRReg fpr = FPRInfo::toRegister(i);
370         if (m_fprs.isInUse(fpr))
371             result.set(fpr);
372     }
373     
374     result.merge(RegisterSet::stubUnavailableRegisters());
375     
376     return result;
377 }
378
379 void SpeculativeJIT::addSlowPathGenerator(std::unique_ptr<SlowPathGenerator> slowPathGenerator)
380 {
381     m_slowPathGenerators.append(WTFMove(slowPathGenerator));
382 }
383
384 void SpeculativeJIT::addSlowPathGenerator(std::function<void()> lambda)
385 {
386     m_slowPathLambdas.append(SlowPathLambda{ lambda, m_currentNode, static_cast<unsigned>(m_stream->size()) });
387 }
388
389 void SpeculativeJIT::runSlowPathGenerators(PCToCodeOriginMapBuilder& pcToCodeOriginMapBuilder)
390 {
391     for (auto& slowPathGenerator : m_slowPathGenerators) {
392         pcToCodeOriginMapBuilder.appendItem(m_jit.labelIgnoringWatchpoints(), slowPathGenerator->origin().semantic);
393         slowPathGenerator->generate(this);
394     }
395     for (auto& slowPathLambda : m_slowPathLambdas) {
396         Node* currentNode = slowPathLambda.currentNode;
397         m_currentNode = currentNode;
398         m_outOfLineStreamIndex = slowPathLambda.streamIndex;
399         pcToCodeOriginMapBuilder.appendItem(m_jit.labelIgnoringWatchpoints(), currentNode->origin.semantic);
400         slowPathLambda.generator();
401         m_outOfLineStreamIndex = std::nullopt;
402     }
403 }
404
405 void SpeculativeJIT::clearGenerationInfo()
406 {
407     for (unsigned i = 0; i < m_generationInfo.size(); ++i)
408         m_generationInfo[i] = GenerationInfo();
409     m_gprs = RegisterBank<GPRInfo>();
410     m_fprs = RegisterBank<FPRInfo>();
411 }
412
413 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForGPR(VirtualRegister spillMe, GPRReg source)
414 {
415     GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
416     Node* node = info.node();
417     DataFormat registerFormat = info.registerFormat();
418     ASSERT(registerFormat != DataFormatNone);
419     ASSERT(registerFormat != DataFormatDouble);
420         
421     SilentSpillAction spillAction;
422     SilentFillAction fillAction;
423         
424     if (!info.needsSpill())
425         spillAction = DoNothingForSpill;
426     else {
427 #if USE(JSVALUE64)
428         ASSERT(info.gpr() == source);
429         if (registerFormat == DataFormatInt32)
430             spillAction = Store32Payload;
431         else if (registerFormat == DataFormatCell || registerFormat == DataFormatStorage)
432             spillAction = StorePtr;
433         else if (registerFormat == DataFormatInt52 || registerFormat == DataFormatStrictInt52)
434             spillAction = Store64;
435         else {
436             ASSERT(registerFormat & DataFormatJS);
437             spillAction = Store64;
438         }
439 #elif USE(JSVALUE32_64)
440         if (registerFormat & DataFormatJS) {
441             ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
442             spillAction = source == info.tagGPR() ? Store32Tag : Store32Payload;
443         } else {
444             ASSERT(info.gpr() == source);
445             spillAction = Store32Payload;
446         }
447 #endif
448     }
449         
450     if (registerFormat == DataFormatInt32) {
451         ASSERT(info.gpr() == source);
452         ASSERT(isJSInt32(info.registerFormat()));
453         if (node->hasConstant()) {
454             ASSERT(node->isInt32Constant());
455             fillAction = SetInt32Constant;
456         } else
457             fillAction = Load32Payload;
458     } else if (registerFormat == DataFormatBoolean) {
459 #if USE(JSVALUE64)
460         RELEASE_ASSERT_NOT_REACHED();
461 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
462         fillAction = DoNothingForFill;
463 #endif
464 #elif USE(JSVALUE32_64)
465         ASSERT(info.gpr() == source);
466         if (node->hasConstant()) {
467             ASSERT(node->isBooleanConstant());
468             fillAction = SetBooleanConstant;
469         } else
470             fillAction = Load32Payload;
471 #endif
472     } else if (registerFormat == DataFormatCell) {
473         ASSERT(info.gpr() == source);
474         if (node->hasConstant()) {
475             DFG_ASSERT(m_jit.graph(), m_currentNode, node->isCellConstant());
476             node->asCell(); // To get the assertion.
477             fillAction = SetCellConstant;
478         } else {
479 #if USE(JSVALUE64)
480             fillAction = LoadPtr;
481 #else
482             fillAction = Load32Payload;
483 #endif
484         }
485     } else if (registerFormat == DataFormatStorage) {
486         ASSERT(info.gpr() == source);
487         fillAction = LoadPtr;
488     } else if (registerFormat == DataFormatInt52) {
489         if (node->hasConstant())
490             fillAction = SetInt52Constant;
491         else if (info.spillFormat() == DataFormatInt52)
492             fillAction = Load64;
493         else if (info.spillFormat() == DataFormatStrictInt52)
494             fillAction = Load64ShiftInt52Left;
495         else if (info.spillFormat() == DataFormatNone)
496             fillAction = Load64;
497         else {
498             RELEASE_ASSERT_NOT_REACHED();
499 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
500             fillAction = Load64; // Make GCC happy.
501 #endif
502         }
503     } else if (registerFormat == DataFormatStrictInt52) {
504         if (node->hasConstant())
505             fillAction = SetStrictInt52Constant;
506         else if (info.spillFormat() == DataFormatInt52)
507             fillAction = Load64ShiftInt52Right;
508         else if (info.spillFormat() == DataFormatStrictInt52)
509             fillAction = Load64;
510         else if (info.spillFormat() == DataFormatNone)
511             fillAction = Load64;
512         else {
513             RELEASE_ASSERT_NOT_REACHED();
514 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
515             fillAction = Load64; // Make GCC happy.
516 #endif
517         }
518     } else {
519         ASSERT(registerFormat & DataFormatJS);
520 #if USE(JSVALUE64)
521         ASSERT(info.gpr() == source);
522         if (node->hasConstant()) {
523             if (node->isCellConstant())
524                 fillAction = SetTrustedJSConstant;
525             else
526                 fillAction = SetJSConstant;
527         } else if (info.spillFormat() == DataFormatInt32) {
528             ASSERT(registerFormat == DataFormatJSInt32);
529             fillAction = Load32PayloadBoxInt;
530         } else
531             fillAction = Load64;
532 #else
533         ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
534         if (node->hasConstant())
535             fillAction = info.tagGPR() == source ? SetJSConstantTag : SetJSConstantPayload;
536         else if (info.payloadGPR() == source)
537             fillAction = Load32Payload;
538         else { // Fill the Tag
539             switch (info.spillFormat()) {
540             case DataFormatInt32:
541                 ASSERT(registerFormat == DataFormatJSInt32);
542                 fillAction = SetInt32Tag;
543                 break;
544             case DataFormatCell:
545                 ASSERT(registerFormat == DataFormatJSCell);
546                 fillAction = SetCellTag;
547                 break;
548             case DataFormatBoolean:
549                 ASSERT(registerFormat == DataFormatJSBoolean);
550                 fillAction = SetBooleanTag;
551                 break;
552             default:
553                 fillAction = Load32Tag;
554                 break;
555             }
556         }
557 #endif
558     }
559         
560     return SilentRegisterSavePlan(spillAction, fillAction, node, source);
561 }
562     
563 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForFPR(VirtualRegister spillMe, FPRReg source)
564 {
565     GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
566     Node* node = info.node();
567     ASSERT(info.registerFormat() == DataFormatDouble);
568
569     SilentSpillAction spillAction;
570     SilentFillAction fillAction;
571         
572     if (!info.needsSpill())
573         spillAction = DoNothingForSpill;
574     else {
575         ASSERT(!node->hasConstant());
576         ASSERT(info.spillFormat() == DataFormatNone);
577         ASSERT(info.fpr() == source);
578         spillAction = StoreDouble;
579     }
580         
581 #if USE(JSVALUE64)
582     if (node->hasConstant()) {
583         node->asNumber(); // To get the assertion.
584         fillAction = SetDoubleConstant;
585     } else {
586         ASSERT(info.spillFormat() == DataFormatNone || info.spillFormat() == DataFormatDouble);
587         fillAction = LoadDouble;
588     }
589 #elif USE(JSVALUE32_64)
590     ASSERT(info.registerFormat() == DataFormatDouble);
591     if (node->hasConstant()) {
592         node->asNumber(); // To get the assertion.
593         fillAction = SetDoubleConstant;
594     } else
595         fillAction = LoadDouble;
596 #endif
597
598     return SilentRegisterSavePlan(spillAction, fillAction, node, source);
599 }
600     
601 void SpeculativeJIT::silentSpill(const SilentRegisterSavePlan& plan)
602 {
603     switch (plan.spillAction()) {
604     case DoNothingForSpill:
605         break;
606     case Store32Tag:
607         m_jit.store32(plan.gpr(), JITCompiler::tagFor(plan.node()->virtualRegister()));
608         break;
609     case Store32Payload:
610         m_jit.store32(plan.gpr(), JITCompiler::payloadFor(plan.node()->virtualRegister()));
611         break;
612     case StorePtr:
613         m_jit.storePtr(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
614         break;
615 #if USE(JSVALUE64)
616     case Store64:
617         m_jit.store64(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
618         break;
619 #endif
620     case StoreDouble:
621         m_jit.storeDouble(plan.fpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
622         break;
623     default:
624         RELEASE_ASSERT_NOT_REACHED();
625     }
626 }
627     
628 void SpeculativeJIT::silentFill(const SilentRegisterSavePlan& plan, GPRReg canTrample)
629 {
630 #if USE(JSVALUE32_64)
631     UNUSED_PARAM(canTrample);
632 #endif
633     switch (plan.fillAction()) {
634     case DoNothingForFill:
635         break;
636     case SetInt32Constant:
637         m_jit.move(Imm32(plan.node()->asInt32()), plan.gpr());
638         break;
639 #if USE(JSVALUE64)
640     case SetInt52Constant:
641         m_jit.move(Imm64(plan.node()->asAnyInt() << JSValue::int52ShiftAmount), plan.gpr());
642         break;
643     case SetStrictInt52Constant:
644         m_jit.move(Imm64(plan.node()->asAnyInt()), plan.gpr());
645         break;
646 #endif // USE(JSVALUE64)
647     case SetBooleanConstant:
648         m_jit.move(TrustedImm32(plan.node()->asBoolean()), plan.gpr());
649         break;
650     case SetCellConstant:
651         ASSERT(plan.node()->constant()->value().isCell());
652         m_jit.move(TrustedImmPtr(plan.node()->constant()), plan.gpr());
653         break;
654 #if USE(JSVALUE64)
655     case SetTrustedJSConstant:
656         m_jit.move(valueOfJSConstantAsImm64(plan.node()).asTrustedImm64(), plan.gpr());
657         break;
658     case SetJSConstant:
659         m_jit.move(valueOfJSConstantAsImm64(plan.node()), plan.gpr());
660         break;
661     case SetDoubleConstant:
662         m_jit.move(Imm64(reinterpretDoubleToInt64(plan.node()->asNumber())), canTrample);
663         m_jit.move64ToDouble(canTrample, plan.fpr());
664         break;
665     case Load32PayloadBoxInt:
666         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
667         m_jit.or64(GPRInfo::tagTypeNumberRegister, plan.gpr());
668         break;
669     case Load32PayloadConvertToInt52:
670         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
671         m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
672         m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
673         break;
674     case Load32PayloadSignExtend:
675         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
676         m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
677         break;
678 #else
679     case SetJSConstantTag:
680         m_jit.move(Imm32(plan.node()->asJSValue().tag()), plan.gpr());
681         break;
682     case SetJSConstantPayload:
683         m_jit.move(Imm32(plan.node()->asJSValue().payload()), plan.gpr());
684         break;
685     case SetInt32Tag:
686         m_jit.move(TrustedImm32(JSValue::Int32Tag), plan.gpr());
687         break;
688     case SetCellTag:
689         m_jit.move(TrustedImm32(JSValue::CellTag), plan.gpr());
690         break;
691     case SetBooleanTag:
692         m_jit.move(TrustedImm32(JSValue::BooleanTag), plan.gpr());
693         break;
694     case SetDoubleConstant:
695         m_jit.loadDouble(TrustedImmPtr(m_jit.addressOfDoubleConstant(plan.node())), plan.fpr());
696         break;
697 #endif
698     case Load32Tag:
699         m_jit.load32(JITCompiler::tagFor(plan.node()->virtualRegister()), plan.gpr());
700         break;
701     case Load32Payload:
702         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
703         break;
704     case LoadPtr:
705         m_jit.loadPtr(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
706         break;
707 #if USE(JSVALUE64)
708     case Load64:
709         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
710         break;
711     case Load64ShiftInt52Right:
712         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
713         m_jit.rshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
714         break;
715     case Load64ShiftInt52Left:
716         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
717         m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
718         break;
719 #endif
720     case LoadDouble:
721         m_jit.loadDouble(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.fpr());
722         break;
723     default:
724         RELEASE_ASSERT_NOT_REACHED();
725     }
726 }
727     
728 JITCompiler::Jump SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode, IndexingType shape)
729 {
730     switch (arrayMode.arrayClass()) {
731     case Array::OriginalArray: {
732         CRASH();
733 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
734         JITCompiler::Jump result; // I already know that VC++ takes unkindly to the expression "return Jump()", so I'm doing it this way in anticipation of someone eventually using VC++ to compile the DFG.
735         return result;
736 #endif
737     }
738         
739     case Array::Array:
740         m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
741         return m_jit.branch32(
742             MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | shape));
743         
744     case Array::NonArray:
745     case Array::OriginalNonArray:
746         m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
747         return m_jit.branch32(
748             MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
749         
750     case Array::PossiblyArray:
751         m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
752         return m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
753     }
754     
755     RELEASE_ASSERT_NOT_REACHED();
756     return JITCompiler::Jump();
757 }
758
759 JITCompiler::JumpList SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode)
760 {
761     JITCompiler::JumpList result;
762     
763     switch (arrayMode.type()) {
764     case Array::Int32:
765         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, Int32Shape);
766
767     case Array::Double:
768         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, DoubleShape);
769
770     case Array::Contiguous:
771         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, ContiguousShape);
772
773     case Array::Undecided:
774         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, UndecidedShape);
775
776     case Array::ArrayStorage:
777     case Array::SlowPutArrayStorage: {
778         ASSERT(!arrayMode.isJSArrayWithOriginalStructure());
779         
780         if (arrayMode.isJSArray()) {
781             if (arrayMode.isSlowPut()) {
782                 result.append(
783                     m_jit.branchTest32(
784                         MacroAssembler::Zero, tempGPR, MacroAssembler::TrustedImm32(IsArray)));
785                 m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
786                 m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
787                 result.append(
788                     m_jit.branch32(
789                         MacroAssembler::Above, tempGPR,
790                         TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
791                 break;
792             }
793             m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
794             result.append(
795                 m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | ArrayStorageShape)));
796             break;
797         }
798         m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
799         if (arrayMode.isSlowPut()) {
800             m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
801             result.append(
802                 m_jit.branch32(
803                     MacroAssembler::Above, tempGPR,
804                     TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
805             break;
806         }
807         result.append(
808             m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(ArrayStorageShape)));
809         break;
810     }
811     default:
812         CRASH();
813         break;
814     }
815     
816     return result;
817 }
818
819 void SpeculativeJIT::checkArray(Node* node)
820 {
821     ASSERT(node->arrayMode().isSpecific());
822     ASSERT(!node->arrayMode().doesConversion());
823     
824     SpeculateCellOperand base(this, node->child1());
825     GPRReg baseReg = base.gpr();
826     
827     if (node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1()))) {
828         noResult(m_currentNode);
829         return;
830     }
831     
832     const ClassInfo* expectedClassInfo = 0;
833     
834     switch (node->arrayMode().type()) {
835     case Array::AnyTypedArray:
836     case Array::String:
837         RELEASE_ASSERT_NOT_REACHED(); // Should have been a Phantom(String:)
838         break;
839     case Array::Int32:
840     case Array::Double:
841     case Array::Contiguous:
842     case Array::Undecided:
843     case Array::ArrayStorage:
844     case Array::SlowPutArrayStorage: {
845         GPRTemporary temp(this);
846         GPRReg tempGPR = temp.gpr();
847         m_jit.load8(MacroAssembler::Address(baseReg, JSCell::indexingTypeAndMiscOffset()), tempGPR);
848         speculationCheck(
849             BadIndexingType, JSValueSource::unboxedCell(baseReg), 0,
850             jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
851         
852         noResult(m_currentNode);
853         return;
854     }
855     case Array::DirectArguments:
856         speculateCellTypeWithoutTypeFiltering(node->child1(), baseReg, DirectArgumentsType);
857         noResult(m_currentNode);
858         return;
859     case Array::ScopedArguments:
860         speculateCellTypeWithoutTypeFiltering(node->child1(), baseReg, ScopedArgumentsType);
861         noResult(m_currentNode);
862         return;
863     default:
864         speculateCellTypeWithoutTypeFiltering(
865             node->child1(), baseReg,
866             typeForTypedArrayType(node->arrayMode().typedArrayType()));
867         noResult(m_currentNode);
868         return;
869     }
870     
871     RELEASE_ASSERT(expectedClassInfo);
872     
873     GPRTemporary temp(this);
874     GPRTemporary temp2(this);
875     m_jit.emitLoadStructure(baseReg, temp.gpr(), temp2.gpr());
876     speculationCheck(
877         BadType, JSValueSource::unboxedCell(baseReg), node,
878         m_jit.branchPtr(
879             MacroAssembler::NotEqual,
880             MacroAssembler::Address(temp.gpr(), Structure::classInfoOffset()),
881             TrustedImmPtr(expectedClassInfo)));
882     
883     noResult(m_currentNode);
884 }
885
886 void SpeculativeJIT::arrayify(Node* node, GPRReg baseReg, GPRReg propertyReg)
887 {
888     ASSERT(node->arrayMode().doesConversion());
889     
890     GPRTemporary temp(this);
891     GPRTemporary structure;
892     GPRReg tempGPR = temp.gpr();
893     GPRReg structureGPR = InvalidGPRReg;
894     
895     if (node->op() != ArrayifyToStructure) {
896         GPRTemporary realStructure(this);
897         structure.adopt(realStructure);
898         structureGPR = structure.gpr();
899     }
900         
901     // We can skip all that comes next if we already have array storage.
902     MacroAssembler::JumpList slowPath;
903     
904     if (node->op() == ArrayifyToStructure) {
905         slowPath.append(m_jit.branchWeakStructure(
906             JITCompiler::NotEqual,
907             JITCompiler::Address(baseReg, JSCell::structureIDOffset()),
908             node->structure()));
909     } else {
910         m_jit.load8(
911             MacroAssembler::Address(baseReg, JSCell::indexingTypeAndMiscOffset()), tempGPR);
912         
913         slowPath.append(jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
914     }
915     
916     addSlowPathGenerator(std::make_unique<ArrayifySlowPathGenerator>(
917         slowPath, this, node, baseReg, propertyReg, tempGPR, structureGPR));
918     
919     noResult(m_currentNode);
920 }
921
922 void SpeculativeJIT::arrayify(Node* node)
923 {
924     ASSERT(node->arrayMode().isSpecific());
925     
926     SpeculateCellOperand base(this, node->child1());
927     
928     if (!node->child2()) {
929         arrayify(node, base.gpr(), InvalidGPRReg);
930         return;
931     }
932     
933     SpeculateInt32Operand property(this, node->child2());
934     
935     arrayify(node, base.gpr(), property.gpr());
936 }
937
938 GPRReg SpeculativeJIT::fillStorage(Edge edge)
939 {
940     VirtualRegister virtualRegister = edge->virtualRegister();
941     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
942     
943     switch (info.registerFormat()) {
944     case DataFormatNone: {
945         if (info.spillFormat() == DataFormatStorage) {
946             GPRReg gpr = allocate();
947             m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
948             m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr);
949             info.fillStorage(*m_stream, gpr);
950             return gpr;
951         }
952         
953         // Must be a cell; fill it as a cell and then return the pointer.
954         return fillSpeculateCell(edge);
955     }
956         
957     case DataFormatStorage: {
958         GPRReg gpr = info.gpr();
959         m_gprs.lock(gpr);
960         return gpr;
961     }
962         
963     default:
964         return fillSpeculateCell(edge);
965     }
966 }
967
968 void SpeculativeJIT::useChildren(Node* node)
969 {
970     if (node->flags() & NodeHasVarArgs) {
971         for (unsigned childIdx = node->firstChild(); childIdx < node->firstChild() + node->numChildren(); childIdx++) {
972             if (!!m_jit.graph().m_varArgChildren[childIdx])
973                 use(m_jit.graph().m_varArgChildren[childIdx]);
974         }
975     } else {
976         Edge child1 = node->child1();
977         if (!child1) {
978             ASSERT(!node->child2() && !node->child3());
979             return;
980         }
981         use(child1);
982         
983         Edge child2 = node->child2();
984         if (!child2) {
985             ASSERT(!node->child3());
986             return;
987         }
988         use(child2);
989         
990         Edge child3 = node->child3();
991         if (!child3)
992             return;
993         use(child3);
994     }
995 }
996
997 void SpeculativeJIT::compileTryGetById(Node* node)
998 {
999     switch (node->child1().useKind()) {
1000     case CellUse: {
1001         SpeculateCellOperand base(this, node->child1());
1002         JSValueRegsTemporary result(this, Reuse, base);
1003
1004         JSValueRegs baseRegs = JSValueRegs::payloadOnly(base.gpr());
1005         JSValueRegs resultRegs = result.regs();
1006
1007         base.use();
1008
1009         cachedGetById(node->origin.semantic, baseRegs, resultRegs, node->identifierNumber(), JITCompiler::Jump(), NeedToSpill, AccessType::TryGet);
1010
1011         jsValueResult(resultRegs, node, DataFormatJS, UseChildrenCalledExplicitly);
1012         break;
1013     }
1014
1015     case UntypedUse: {
1016         JSValueOperand base(this, node->child1());
1017         JSValueRegsTemporary result(this, Reuse, base);
1018
1019         JSValueRegs baseRegs = base.jsValueRegs();
1020         JSValueRegs resultRegs = result.regs();
1021
1022         base.use();
1023
1024         JITCompiler::Jump notCell = m_jit.branchIfNotCell(baseRegs);
1025
1026         cachedGetById(node->origin.semantic, baseRegs, resultRegs, node->identifierNumber(), notCell, NeedToSpill, AccessType::TryGet);
1027
1028         jsValueResult(resultRegs, node, DataFormatJS, UseChildrenCalledExplicitly);
1029         break;
1030     }
1031
1032     default:
1033         DFG_CRASH(m_jit.graph(), node, "Bad use kind");
1034         break;
1035     } 
1036 }
1037
1038 void SpeculativeJIT::compileIn(Node* node)
1039 {
1040     SpeculateCellOperand base(this, node->child1());
1041     GPRReg baseGPR = base.gpr();
1042     
1043     if (JSString* string = node->child2()->dynamicCastConstant<JSString*>(*m_jit.vm())) {
1044         if (string->tryGetValueImpl() && string->tryGetValueImpl()->isAtomic()) {
1045             StructureStubInfo* stubInfo = m_jit.codeBlock()->addStubInfo(AccessType::In);
1046             
1047             GPRTemporary result(this);
1048             GPRReg resultGPR = result.gpr();
1049
1050             use(node->child2());
1051             
1052             MacroAssembler::PatchableJump jump = m_jit.patchableJump();
1053             MacroAssembler::Label done = m_jit.label();
1054             
1055             // Since this block is executed only when the result of string->tryGetValueImpl() is atomic,
1056             // we can cast it to const AtomicStringImpl* safely.
1057             auto slowPath = slowPathCall(
1058                 jump.m_jump, this, operationInOptimize,
1059                 JSValueRegs::payloadOnly(resultGPR), stubInfo, baseGPR,
1060                 static_cast<const AtomicStringImpl*>(string->tryGetValueImpl()));
1061             
1062             stubInfo->callSiteIndex = m_jit.addCallSite(node->origin.semantic);
1063             stubInfo->codeOrigin = node->origin.semantic;
1064             stubInfo->patch.baseGPR = static_cast<int8_t>(baseGPR);
1065             stubInfo->patch.valueGPR = static_cast<int8_t>(resultGPR);
1066 #if USE(JSVALUE32_64)
1067             stubInfo->patch.valueTagGPR = static_cast<int8_t>(InvalidGPRReg);
1068             stubInfo->patch.baseTagGPR = static_cast<int8_t>(InvalidGPRReg);
1069 #endif
1070             stubInfo->patch.usedRegisters = usedRegisters();
1071
1072             m_jit.addIn(InRecord(jump, done, slowPath.get(), stubInfo));
1073             addSlowPathGenerator(WTFMove(slowPath));
1074
1075             base.use();
1076
1077             blessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
1078             return;
1079         }
1080     }
1081
1082     JSValueOperand key(this, node->child2());
1083     JSValueRegs regs = key.jsValueRegs();
1084         
1085     GPRFlushedCallResult result(this);
1086     GPRReg resultGPR = result.gpr();
1087         
1088     base.use();
1089     key.use();
1090         
1091     flushRegisters();
1092     callOperation(
1093         operationGenericIn, extractResult(JSValueRegs::payloadOnly(resultGPR)),
1094         baseGPR, regs);
1095     m_jit.exceptionCheck();
1096     blessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
1097 }
1098
1099 void SpeculativeJIT::compileDeleteById(Node* node)
1100 {
1101     JSValueOperand value(this, node->child1());
1102     GPRFlushedCallResult result(this);
1103
1104     JSValueRegs valueRegs = value.jsValueRegs();
1105     GPRReg resultGPR = result.gpr();
1106
1107     value.use();
1108
1109     flushRegisters();
1110     callOperation(operationDeleteById, resultGPR, valueRegs, identifierUID(node->identifierNumber()));
1111     m_jit.exceptionCheck();
1112
1113     unblessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
1114 }
1115
1116 void SpeculativeJIT::compileDeleteByVal(Node* node)
1117 {
1118     JSValueOperand base(this, node->child1());
1119     JSValueOperand key(this, node->child2());
1120     GPRFlushedCallResult result(this);
1121
1122     JSValueRegs baseRegs = base.jsValueRegs();
1123     JSValueRegs keyRegs = key.jsValueRegs();
1124     GPRReg resultGPR = result.gpr();
1125
1126     base.use();
1127     key.use();
1128
1129     flushRegisters();
1130     callOperation(operationDeleteByVal, resultGPR, baseRegs, keyRegs);
1131     m_jit.exceptionCheck();
1132
1133     unblessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
1134 }
1135
1136 bool SpeculativeJIT::nonSpeculativeCompare(Node* node, MacroAssembler::RelationalCondition cond, S_JITOperation_EJJ helperFunction)
1137 {
1138     unsigned branchIndexInBlock = detectPeepHoleBranch();
1139     if (branchIndexInBlock != UINT_MAX) {
1140         Node* branchNode = m_block->at(branchIndexInBlock);
1141
1142         ASSERT(node->adjustedRefCount() == 1);
1143         
1144         nonSpeculativePeepholeBranch(node, branchNode, cond, helperFunction);
1145     
1146         m_indexInBlock = branchIndexInBlock;
1147         m_currentNode = branchNode;
1148         
1149         return true;
1150     }
1151     
1152     nonSpeculativeNonPeepholeCompare(node, cond, helperFunction);
1153     
1154     return false;
1155 }
1156
1157 bool SpeculativeJIT::nonSpeculativeStrictEq(Node* node, bool invert)
1158 {
1159     unsigned branchIndexInBlock = detectPeepHoleBranch();
1160     if (branchIndexInBlock != UINT_MAX) {
1161         Node* branchNode = m_block->at(branchIndexInBlock);
1162
1163         ASSERT(node->adjustedRefCount() == 1);
1164         
1165         nonSpeculativePeepholeStrictEq(node, branchNode, invert);
1166     
1167         m_indexInBlock = branchIndexInBlock;
1168         m_currentNode = branchNode;
1169         
1170         return true;
1171     }
1172     
1173     nonSpeculativeNonPeepholeStrictEq(node, invert);
1174     
1175     return false;
1176 }
1177
1178 static const char* dataFormatString(DataFormat format)
1179 {
1180     // These values correspond to the DataFormat enum.
1181     const char* strings[] = {
1182         "[  ]",
1183         "[ i]",
1184         "[ d]",
1185         "[ c]",
1186         "Err!",
1187         "Err!",
1188         "Err!",
1189         "Err!",
1190         "[J ]",
1191         "[Ji]",
1192         "[Jd]",
1193         "[Jc]",
1194         "Err!",
1195         "Err!",
1196         "Err!",
1197         "Err!",
1198     };
1199     return strings[format];
1200 }
1201
1202 void SpeculativeJIT::dump(const char* label)
1203 {
1204     if (label)
1205         dataLogF("<%s>\n", label);
1206
1207     dataLogF("  gprs:\n");
1208     m_gprs.dump();
1209     dataLogF("  fprs:\n");
1210     m_fprs.dump();
1211     dataLogF("  VirtualRegisters:\n");
1212     for (unsigned i = 0; i < m_generationInfo.size(); ++i) {
1213         GenerationInfo& info = m_generationInfo[i];
1214         if (info.alive())
1215             dataLogF("    % 3d:%s%s", i, dataFormatString(info.registerFormat()), dataFormatString(info.spillFormat()));
1216         else
1217             dataLogF("    % 3d:[__][__]", i);
1218         if (info.registerFormat() == DataFormatDouble)
1219             dataLogF(":fpr%d\n", info.fpr());
1220         else if (info.registerFormat() != DataFormatNone
1221 #if USE(JSVALUE32_64)
1222             && !(info.registerFormat() & DataFormatJS)
1223 #endif
1224             ) {
1225             ASSERT(info.gpr() != InvalidGPRReg);
1226             dataLogF(":%s\n", GPRInfo::debugName(info.gpr()));
1227         } else
1228             dataLogF("\n");
1229     }
1230     if (label)
1231         dataLogF("</%s>\n", label);
1232 }
1233
1234 GPRTemporary::GPRTemporary()
1235     : m_jit(0)
1236     , m_gpr(InvalidGPRReg)
1237 {
1238 }
1239
1240 GPRTemporary::GPRTemporary(SpeculativeJIT* jit)
1241     : m_jit(jit)
1242     , m_gpr(InvalidGPRReg)
1243 {
1244     m_gpr = m_jit->allocate();
1245 }
1246
1247 GPRTemporary::GPRTemporary(SpeculativeJIT* jit, GPRReg specific)
1248     : m_jit(jit)
1249     , m_gpr(InvalidGPRReg)
1250 {
1251     m_gpr = m_jit->allocate(specific);
1252 }
1253
1254 #if USE(JSVALUE32_64)
1255 GPRTemporary::GPRTemporary(
1256     SpeculativeJIT* jit, ReuseTag, JSValueOperand& op1, WhichValueWord which)
1257     : m_jit(jit)
1258     , m_gpr(InvalidGPRReg)
1259 {
1260     if (!op1.isDouble() && m_jit->canReuse(op1.node()))
1261         m_gpr = m_jit->reuse(op1.gpr(which));
1262     else
1263         m_gpr = m_jit->allocate();
1264 }
1265 #endif // USE(JSVALUE32_64)
1266
1267 JSValueRegsTemporary::JSValueRegsTemporary() { }
1268
1269 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit)
1270 #if USE(JSVALUE64)
1271     : m_gpr(jit)
1272 #else
1273     : m_payloadGPR(jit)
1274     , m_tagGPR(jit)
1275 #endif
1276 {
1277 }
1278
1279 #if USE(JSVALUE64)
1280 template<typename T>
1281 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit, ReuseTag, T& operand, WhichValueWord)
1282     : m_gpr(jit, Reuse, operand)
1283 {
1284 }
1285 #else
1286 template<typename T>
1287 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit, ReuseTag, T& operand, WhichValueWord resultWord)
1288 {
1289     if (resultWord == PayloadWord) {
1290         m_payloadGPR = GPRTemporary(jit, Reuse, operand);
1291         m_tagGPR = GPRTemporary(jit);
1292     } else {
1293         m_payloadGPR = GPRTemporary(jit);
1294         m_tagGPR = GPRTemporary(jit, Reuse, operand);
1295     }
1296 }
1297 #endif
1298
1299 #if USE(JSVALUE64)
1300 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit, ReuseTag, JSValueOperand& operand)
1301 {
1302     m_gpr = GPRTemporary(jit, Reuse, operand);
1303 }
1304 #else
1305 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit, ReuseTag, JSValueOperand& operand)
1306 {
1307     if (jit->canReuse(operand.node())) {
1308         m_payloadGPR = GPRTemporary(jit, Reuse, operand, PayloadWord);
1309         m_tagGPR = GPRTemporary(jit, Reuse, operand, TagWord);
1310     } else {
1311         m_payloadGPR = GPRTemporary(jit);
1312         m_tagGPR = GPRTemporary(jit);
1313     }
1314 }
1315 #endif
1316
1317 JSValueRegsTemporary::~JSValueRegsTemporary() { }
1318
1319 JSValueRegs JSValueRegsTemporary::regs()
1320 {
1321 #if USE(JSVALUE64)
1322     return JSValueRegs(m_gpr.gpr());
1323 #else
1324     return JSValueRegs(m_tagGPR.gpr(), m_payloadGPR.gpr());
1325 #endif
1326 }
1327
1328 void GPRTemporary::adopt(GPRTemporary& other)
1329 {
1330     ASSERT(!m_jit);
1331     ASSERT(m_gpr == InvalidGPRReg);
1332     ASSERT(other.m_jit);
1333     ASSERT(other.m_gpr != InvalidGPRReg);
1334     m_jit = other.m_jit;
1335     m_gpr = other.m_gpr;
1336     other.m_jit = 0;
1337     other.m_gpr = InvalidGPRReg;
1338 }
1339
1340 FPRTemporary::FPRTemporary(FPRTemporary&& other)
1341 {
1342     ASSERT(other.m_jit);
1343     ASSERT(other.m_fpr != InvalidFPRReg);
1344     m_jit = other.m_jit;
1345     m_fpr = other.m_fpr;
1346
1347     other.m_jit = nullptr;
1348 }
1349
1350 FPRTemporary::FPRTemporary(SpeculativeJIT* jit)
1351     : m_jit(jit)
1352     , m_fpr(InvalidFPRReg)
1353 {
1354     m_fpr = m_jit->fprAllocate();
1355 }
1356
1357 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1)
1358     : m_jit(jit)
1359     , m_fpr(InvalidFPRReg)
1360 {
1361     if (m_jit->canReuse(op1.node()))
1362         m_fpr = m_jit->reuse(op1.fpr());
1363     else
1364         m_fpr = m_jit->fprAllocate();
1365 }
1366
1367 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1, SpeculateDoubleOperand& op2)
1368     : m_jit(jit)
1369     , m_fpr(InvalidFPRReg)
1370 {
1371     if (m_jit->canReuse(op1.node()))
1372         m_fpr = m_jit->reuse(op1.fpr());
1373     else if (m_jit->canReuse(op2.node()))
1374         m_fpr = m_jit->reuse(op2.fpr());
1375     else if (m_jit->canReuse(op1.node(), op2.node()) && op1.fpr() == op2.fpr())
1376         m_fpr = m_jit->reuse(op1.fpr());
1377     else
1378         m_fpr = m_jit->fprAllocate();
1379 }
1380
1381 #if USE(JSVALUE32_64)
1382 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, JSValueOperand& op1)
1383     : m_jit(jit)
1384     , m_fpr(InvalidFPRReg)
1385 {
1386     if (op1.isDouble() && m_jit->canReuse(op1.node()))
1387         m_fpr = m_jit->reuse(op1.fpr());
1388     else
1389         m_fpr = m_jit->fprAllocate();
1390 }
1391 #endif
1392
1393 void SpeculativeJIT::compilePeepHoleDoubleBranch(Node* node, Node* branchNode, JITCompiler::DoubleCondition condition)
1394 {
1395     BasicBlock* taken = branchNode->branchData()->taken.block;
1396     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1397
1398     if (taken == nextBlock()) {
1399         condition = MacroAssembler::invert(condition);
1400         std::swap(taken, notTaken);
1401     }
1402
1403     SpeculateDoubleOperand op1(this, node->child1());
1404     SpeculateDoubleOperand op2(this, node->child2());
1405     
1406     branchDouble(condition, op1.fpr(), op2.fpr(), taken);
1407     jump(notTaken);
1408 }
1409
1410 void SpeculativeJIT::compilePeepHoleObjectEquality(Node* node, Node* branchNode)
1411 {
1412     BasicBlock* taken = branchNode->branchData()->taken.block;
1413     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1414
1415     MacroAssembler::RelationalCondition condition = MacroAssembler::Equal;
1416     
1417     if (taken == nextBlock()) {
1418         condition = MacroAssembler::NotEqual;
1419         BasicBlock* tmp = taken;
1420         taken = notTaken;
1421         notTaken = tmp;
1422     }
1423
1424     SpeculateCellOperand op1(this, node->child1());
1425     SpeculateCellOperand op2(this, node->child2());
1426     
1427     GPRReg op1GPR = op1.gpr();
1428     GPRReg op2GPR = op2.gpr();
1429     
1430     if (masqueradesAsUndefinedWatchpointIsStillValid()) {
1431         if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1432             speculationCheck(
1433                 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(), m_jit.branchIfNotObject(op1GPR));
1434         }
1435         if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1436             speculationCheck(
1437                 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(), m_jit.branchIfNotObject(op2GPR));
1438         }
1439     } else {
1440         if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1441             speculationCheck(
1442                 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1443                 m_jit.branchIfNotObject(op1GPR));
1444         }
1445         speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1446             m_jit.branchTest8(
1447                 MacroAssembler::NonZero, 
1448                 MacroAssembler::Address(op1GPR, JSCell::typeInfoFlagsOffset()), 
1449                 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1450
1451         if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1452             speculationCheck(
1453                 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1454                 m_jit.branchIfNotObject(op2GPR));
1455         }
1456         speculationCheck(BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1457             m_jit.branchTest8(
1458                 MacroAssembler::NonZero, 
1459                 MacroAssembler::Address(op2GPR, JSCell::typeInfoFlagsOffset()), 
1460                 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1461     }
1462
1463     branchPtr(condition, op1GPR, op2GPR, taken);
1464     jump(notTaken);
1465 }
1466
1467 void SpeculativeJIT::compilePeepHoleBooleanBranch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1468 {
1469     BasicBlock* taken = branchNode->branchData()->taken.block;
1470     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1471
1472     // The branch instruction will branch to the taken block.
1473     // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1474     if (taken == nextBlock()) {
1475         condition = JITCompiler::invert(condition);
1476         BasicBlock* tmp = taken;
1477         taken = notTaken;
1478         notTaken = tmp;
1479     }
1480
1481     if (node->child1()->isInt32Constant()) {
1482         int32_t imm = node->child1()->asInt32();
1483         SpeculateBooleanOperand op2(this, node->child2());
1484         branch32(condition, JITCompiler::Imm32(imm), op2.gpr(), taken);
1485     } else if (node->child2()->isInt32Constant()) {
1486         SpeculateBooleanOperand op1(this, node->child1());
1487         int32_t imm = node->child2()->asInt32();
1488         branch32(condition, op1.gpr(), JITCompiler::Imm32(imm), taken);
1489     } else {
1490         SpeculateBooleanOperand op1(this, node->child1());
1491         SpeculateBooleanOperand op2(this, node->child2());
1492         branch32(condition, op1.gpr(), op2.gpr(), taken);
1493     }
1494
1495     jump(notTaken);
1496 }
1497
1498 void SpeculativeJIT::compileToLowerCase(Node* node)
1499 {
1500     ASSERT(node->op() == ToLowerCase);
1501     SpeculateCellOperand string(this, node->child1());
1502     GPRTemporary temp(this);
1503     GPRTemporary index(this);
1504     GPRTemporary charReg(this);
1505     GPRTemporary length(this);
1506
1507     GPRReg stringGPR = string.gpr();
1508     GPRReg tempGPR = temp.gpr();
1509     GPRReg indexGPR = index.gpr();
1510     GPRReg charGPR = charReg.gpr();
1511     GPRReg lengthGPR = length.gpr();
1512
1513     speculateString(node->child1(), stringGPR);
1514
1515     CCallHelpers::JumpList slowPath;
1516
1517     m_jit.move(TrustedImmPtr(0), indexGPR);
1518
1519     m_jit.loadPtr(MacroAssembler::Address(stringGPR, JSString::offsetOfValue()), tempGPR);
1520     slowPath.append(m_jit.branchTestPtr(MacroAssembler::Zero, tempGPR));
1521
1522     slowPath.append(m_jit.branchTest32(
1523         MacroAssembler::Zero, MacroAssembler::Address(tempGPR, StringImpl::flagsOffset()),
1524         MacroAssembler::TrustedImm32(StringImpl::flagIs8Bit())));
1525     m_jit.load32(MacroAssembler::Address(tempGPR, StringImpl::lengthMemoryOffset()), lengthGPR);
1526     m_jit.loadPtr(MacroAssembler::Address(tempGPR, StringImpl::dataOffset()), tempGPR);
1527
1528     auto loopStart = m_jit.label();
1529     auto loopDone = m_jit.branch32(CCallHelpers::AboveOrEqual, indexGPR, lengthGPR);
1530     m_jit.load8(MacroAssembler::BaseIndex(tempGPR, indexGPR, MacroAssembler::TimesOne), charGPR);
1531     slowPath.append(m_jit.branchTest32(CCallHelpers::NonZero, charGPR, TrustedImm32(~0x7F)));
1532     m_jit.sub32(TrustedImm32('A'), charGPR);
1533     slowPath.append(m_jit.branch32(CCallHelpers::BelowOrEqual, charGPR, TrustedImm32('Z' - 'A')));
1534
1535     m_jit.add32(TrustedImm32(1), indexGPR);
1536     m_jit.jump().linkTo(loopStart, &m_jit);
1537     
1538     slowPath.link(&m_jit);
1539     silentSpillAllRegisters(lengthGPR);
1540     callOperation(operationToLowerCase, lengthGPR, stringGPR, indexGPR);
1541     silentFillAllRegisters(lengthGPR);
1542     m_jit.exceptionCheck();
1543     auto done = m_jit.jump();
1544
1545     loopDone.link(&m_jit);
1546     m_jit.move(stringGPR, lengthGPR);
1547
1548     done.link(&m_jit);
1549     cellResult(lengthGPR, node);
1550 }
1551
1552 void SpeculativeJIT::compilePeepHoleInt32Branch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1553 {
1554     BasicBlock* taken = branchNode->branchData()->taken.block;
1555     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1556
1557     // The branch instruction will branch to the taken block.
1558     // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1559     if (taken == nextBlock()) {
1560         condition = JITCompiler::invert(condition);
1561         BasicBlock* tmp = taken;
1562         taken = notTaken;
1563         notTaken = tmp;
1564     }
1565
1566     if (node->child1()->isInt32Constant()) {
1567         int32_t imm = node->child1()->asInt32();
1568         SpeculateInt32Operand op2(this, node->child2());
1569         branch32(condition, JITCompiler::Imm32(imm), op2.gpr(), taken);
1570     } else if (node->child2()->isInt32Constant()) {
1571         SpeculateInt32Operand op1(this, node->child1());
1572         int32_t imm = node->child2()->asInt32();
1573         branch32(condition, op1.gpr(), JITCompiler::Imm32(imm), taken);
1574     } else {
1575         SpeculateInt32Operand op1(this, node->child1());
1576         SpeculateInt32Operand op2(this, node->child2());
1577         branch32(condition, op1.gpr(), op2.gpr(), taken);
1578     }
1579
1580     jump(notTaken);
1581 }
1582
1583 // Returns true if the compare is fused with a subsequent branch.
1584 bool SpeculativeJIT::compilePeepHoleBranch(Node* node, MacroAssembler::RelationalCondition condition, MacroAssembler::DoubleCondition doubleCondition, S_JITOperation_EJJ operation)
1585 {
1586     // Fused compare & branch.
1587     unsigned branchIndexInBlock = detectPeepHoleBranch();
1588     if (branchIndexInBlock != UINT_MAX) {
1589         Node* branchNode = m_block->at(branchIndexInBlock);
1590
1591         // detectPeepHoleBranch currently only permits the branch to be the very next node,
1592         // so can be no intervening nodes to also reference the compare. 
1593         ASSERT(node->adjustedRefCount() == 1);
1594
1595         if (node->isBinaryUseKind(Int32Use))
1596             compilePeepHoleInt32Branch(node, branchNode, condition);
1597 #if USE(JSVALUE64)
1598         else if (node->isBinaryUseKind(Int52RepUse))
1599             compilePeepHoleInt52Branch(node, branchNode, condition);
1600 #endif // USE(JSVALUE64)
1601         else if (node->isBinaryUseKind(StringUse) || node->isBinaryUseKind(StringIdentUse)) {
1602             // Use non-peephole comparison, for now.
1603             return false;
1604         } else if (node->isBinaryUseKind(DoubleRepUse))
1605             compilePeepHoleDoubleBranch(node, branchNode, doubleCondition);
1606         else if (node->op() == CompareEq) {
1607             if (node->isBinaryUseKind(BooleanUse))
1608                 compilePeepHoleBooleanBranch(node, branchNode, condition);
1609             else if (node->isBinaryUseKind(SymbolUse))
1610                 compilePeepHoleSymbolEquality(node, branchNode);
1611             else if (node->isBinaryUseKind(ObjectUse))
1612                 compilePeepHoleObjectEquality(node, branchNode);
1613             else if (node->isBinaryUseKind(ObjectUse, ObjectOrOtherUse))
1614                 compilePeepHoleObjectToObjectOrOtherEquality(node->child1(), node->child2(), branchNode);
1615             else if (node->isBinaryUseKind(ObjectOrOtherUse, ObjectUse))
1616                 compilePeepHoleObjectToObjectOrOtherEquality(node->child2(), node->child1(), branchNode);
1617             else if (!needsTypeCheck(node->child1(), SpecOther))
1618                 nonSpeculativePeepholeBranchNullOrUndefined(node->child2(), branchNode);
1619             else if (!needsTypeCheck(node->child2(), SpecOther))
1620                 nonSpeculativePeepholeBranchNullOrUndefined(node->child1(), branchNode);
1621             else {
1622                 nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1623                 return true;
1624             }
1625         } else {
1626             nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1627             return true;
1628         }
1629
1630         use(node->child1());
1631         use(node->child2());
1632         m_indexInBlock = branchIndexInBlock;
1633         m_currentNode = branchNode;
1634         return true;
1635     }
1636     return false;
1637 }
1638
1639 void SpeculativeJIT::noticeOSRBirth(Node* node)
1640 {
1641     if (!node->hasVirtualRegister())
1642         return;
1643     
1644     VirtualRegister virtualRegister = node->virtualRegister();
1645     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1646     
1647     info.noticeOSRBirth(*m_stream, node, virtualRegister);
1648 }
1649
1650 void SpeculativeJIT::compileMovHint(Node* node)
1651 {
1652     ASSERT(node->containsMovHint() && node->op() != ZombieHint);
1653     
1654     Node* child = node->child1().node();
1655     noticeOSRBirth(child);
1656     
1657     m_stream->appendAndLog(VariableEvent::movHint(MinifiedID(child), node->unlinkedLocal()));
1658 }
1659
1660 void SpeculativeJIT::bail(AbortReason reason)
1661 {
1662     if (verboseCompilationEnabled())
1663         dataLog("Bailing compilation.\n");
1664     m_compileOkay = true;
1665     m_jit.abortWithReason(reason, m_lastGeneratedNode);
1666     clearGenerationInfo();
1667 }
1668
1669 void SpeculativeJIT::compileCurrentBlock()
1670 {
1671     ASSERT(m_compileOkay);
1672     
1673     if (!m_block)
1674         return;
1675     
1676     ASSERT(m_block->isReachable);
1677     
1678     m_jit.blockHeads()[m_block->index] = m_jit.label();
1679
1680     if (!m_block->intersectionOfCFAHasVisited) {
1681         // Don't generate code for basic blocks that are unreachable according to CFA.
1682         // But to be sure that nobody has generated a jump to this block, drop in a
1683         // breakpoint here.
1684         m_jit.abortWithReason(DFGUnreachableBasicBlock);
1685         return;
1686     }
1687
1688     m_stream->appendAndLog(VariableEvent::reset());
1689     
1690     m_jit.jitAssertHasValidCallFrame();
1691     m_jit.jitAssertTagsInPlace();
1692     m_jit.jitAssertArgumentCountSane();
1693
1694     m_state.reset();
1695     m_state.beginBasicBlock(m_block);
1696     
1697     for (size_t i = m_block->variablesAtHead.size(); i--;) {
1698         int operand = m_block->variablesAtHead.operandForIndex(i);
1699         Node* node = m_block->variablesAtHead[i];
1700         if (!node)
1701             continue; // No need to record dead SetLocal's.
1702         
1703         VariableAccessData* variable = node->variableAccessData();
1704         DataFormat format;
1705         if (!node->refCount())
1706             continue; // No need to record dead SetLocal's.
1707         format = dataFormatFor(variable->flushFormat());
1708         m_stream->appendAndLog(
1709             VariableEvent::setLocal(
1710                 VirtualRegister(operand),
1711                 variable->machineLocal(),
1712                 format));
1713     }
1714
1715     m_origin = NodeOrigin();
1716     
1717     for (m_indexInBlock = 0; m_indexInBlock < m_block->size(); ++m_indexInBlock) {
1718         m_currentNode = m_block->at(m_indexInBlock);
1719         
1720         // We may have hit a contradiction that the CFA was aware of but that the JIT
1721         // didn't cause directly.
1722         if (!m_state.isValid()) {
1723             bail(DFGBailedAtTopOfBlock);
1724             return;
1725         }
1726
1727         m_interpreter.startExecuting();
1728         m_interpreter.executeKnownEdgeTypes(m_currentNode);
1729         m_jit.setForNode(m_currentNode);
1730         m_origin = m_currentNode->origin;
1731         if (validationEnabled())
1732             m_origin.exitOK &= mayExit(m_jit.graph(), m_currentNode) == Exits;
1733         m_lastGeneratedNode = m_currentNode->op();
1734         
1735         ASSERT(m_currentNode->shouldGenerate());
1736         
1737         if (verboseCompilationEnabled()) {
1738             dataLogF(
1739                 "SpeculativeJIT generating Node @%d (bc#%u) at JIT offset 0x%x",
1740                 (int)m_currentNode->index(),
1741                 m_currentNode->origin.semantic.bytecodeIndex, m_jit.debugOffset());
1742             dataLog("\n");
1743         }
1744
1745         if (Options::validateDFGExceptionHandling() && (mayExit(m_jit.graph(), m_currentNode) != DoesNotExit || m_currentNode->isTerminal()))
1746             m_jit.jitReleaseAssertNoException();
1747
1748         m_jit.pcToCodeOriginMapBuilder().appendItem(m_jit.labelIgnoringWatchpoints(), m_origin.semantic);
1749
1750         compile(m_currentNode);
1751         
1752         if (belongsInMinifiedGraph(m_currentNode->op()))
1753             m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode));
1754         
1755 #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
1756         m_jit.clearRegisterAllocationOffsets();
1757 #endif
1758         
1759         if (!m_compileOkay) {
1760             bail(DFGBailedAtEndOfNode);
1761             return;
1762         }
1763         
1764         // Make sure that the abstract state is rematerialized for the next node.
1765         m_interpreter.executeEffects(m_indexInBlock);
1766     }
1767     
1768     // Perform the most basic verification that children have been used correctly.
1769     if (!ASSERT_DISABLED) {
1770         for (auto& info : m_generationInfo)
1771             RELEASE_ASSERT(!info.alive());
1772     }
1773 }
1774
1775 // If we are making type predictions about our arguments then
1776 // we need to check that they are correct on function entry.
1777 void SpeculativeJIT::checkArgumentTypes()
1778 {
1779     ASSERT(!m_currentNode);
1780     m_origin = NodeOrigin(CodeOrigin(0), CodeOrigin(0), true);
1781
1782     for (int i = 0; i < m_jit.codeBlock()->numParameters(); ++i) {
1783         Node* node = m_jit.graph().m_arguments[i];
1784         if (!node) {
1785             // The argument is dead. We don't do any checks for such arguments.
1786             continue;
1787         }
1788         
1789         ASSERT(node->op() == SetArgument);
1790         ASSERT(node->shouldGenerate());
1791
1792         VariableAccessData* variableAccessData = node->variableAccessData();
1793         FlushFormat format = variableAccessData->flushFormat();
1794         
1795         if (format == FlushedJSValue)
1796             continue;
1797         
1798         VirtualRegister virtualRegister = variableAccessData->local();
1799
1800         JSValueSource valueSource = JSValueSource(JITCompiler::addressFor(virtualRegister));
1801         
1802 #if USE(JSVALUE64)
1803         switch (format) {
1804         case FlushedInt32: {
1805             speculationCheck(BadType, valueSource, node, m_jit.branch64(MacroAssembler::Below, JITCompiler::addressFor(virtualRegister), GPRInfo::tagTypeNumberRegister));
1806             break;
1807         }
1808         case FlushedBoolean: {
1809             GPRTemporary temp(this);
1810             m_jit.load64(JITCompiler::addressFor(virtualRegister), temp.gpr());
1811             m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), temp.gpr());
1812             speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, temp.gpr(), TrustedImm32(static_cast<int32_t>(~1))));
1813             break;
1814         }
1815         case FlushedCell: {
1816             speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, JITCompiler::addressFor(virtualRegister), GPRInfo::tagMaskRegister));
1817             break;
1818         }
1819         default:
1820             RELEASE_ASSERT_NOT_REACHED();
1821             break;
1822         }
1823 #else
1824         switch (format) {
1825         case FlushedInt32: {
1826             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::Int32Tag)));
1827             break;
1828         }
1829         case FlushedBoolean: {
1830             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::BooleanTag)));
1831             break;
1832         }
1833         case FlushedCell: {
1834             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::CellTag)));
1835             break;
1836         }
1837         default:
1838             RELEASE_ASSERT_NOT_REACHED();
1839             break;
1840         }
1841 #endif
1842     }
1843
1844     m_origin = NodeOrigin();
1845 }
1846
1847 bool SpeculativeJIT::compile()
1848 {
1849     checkArgumentTypes();
1850     
1851     ASSERT(!m_currentNode);
1852     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1853         m_jit.setForBlockIndex(blockIndex);
1854         m_block = m_jit.graph().block(blockIndex);
1855         compileCurrentBlock();
1856     }
1857     linkBranches();
1858     return true;
1859 }
1860
1861 void SpeculativeJIT::createOSREntries()
1862 {
1863     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1864         BasicBlock* block = m_jit.graph().block(blockIndex);
1865         if (!block)
1866             continue;
1867         if (!block->isOSRTarget)
1868             continue;
1869         
1870         // Currently we don't have OSR entry trampolines. We could add them
1871         // here if need be.
1872         m_osrEntryHeads.append(m_jit.blockHeads()[blockIndex]);
1873     }
1874 }
1875
1876 void SpeculativeJIT::linkOSREntries(LinkBuffer& linkBuffer)
1877 {
1878     unsigned osrEntryIndex = 0;
1879     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1880         BasicBlock* block = m_jit.graph().block(blockIndex);
1881         if (!block)
1882             continue;
1883         if (!block->isOSRTarget)
1884             continue;
1885         m_jit.noticeOSREntry(*block, m_osrEntryHeads[osrEntryIndex++], linkBuffer);
1886     }
1887     ASSERT(osrEntryIndex == m_osrEntryHeads.size());
1888     
1889     if (verboseCompilationEnabled()) {
1890         DumpContext dumpContext;
1891         dataLog("OSR Entries:\n");
1892         for (OSREntryData& entryData : m_jit.jitCode()->osrEntry)
1893             dataLog("    ", inContext(entryData, &dumpContext), "\n");
1894         if (!dumpContext.isEmpty())
1895             dumpContext.dump(WTF::dataFile());
1896     }
1897 }
1898     
1899 void SpeculativeJIT::compileCheckTraps(Node*)
1900 {
1901     ASSERT(Options::usePollingTraps());
1902     GPRTemporary unused(this);
1903     GPRReg unusedGPR = unused.gpr();
1904
1905     JITCompiler::Jump needTrapHandling = m_jit.branchTest8(JITCompiler::NonZero,
1906         JITCompiler::AbsoluteAddress(m_jit.vm()->needTrapHandlingAddress()));
1907
1908     addSlowPathGenerator(slowPathCall(needTrapHandling, this, operationHandleTraps, unusedGPR));
1909 }
1910
1911 void SpeculativeJIT::compileDoublePutByVal(Node* node, SpeculateCellOperand& base, SpeculateStrictInt32Operand& property)
1912 {
1913     Edge child3 = m_jit.graph().varArgChild(node, 2);
1914     Edge child4 = m_jit.graph().varArgChild(node, 3);
1915
1916     ArrayMode arrayMode = node->arrayMode();
1917     
1918     GPRReg baseReg = base.gpr();
1919     GPRReg propertyReg = property.gpr();
1920     
1921     SpeculateDoubleOperand value(this, child3);
1922
1923     FPRReg valueReg = value.fpr();
1924     
1925     DFG_TYPE_CHECK(
1926         JSValueRegs(), child3, SpecFullRealNumber,
1927         m_jit.branchDouble(
1928             MacroAssembler::DoubleNotEqualOrUnordered, valueReg, valueReg));
1929     
1930     if (!m_compileOkay)
1931         return;
1932     
1933     StorageOperand storage(this, child4);
1934     GPRReg storageReg = storage.gpr();
1935
1936     if (node->op() == PutByValAlias) {
1937         // Store the value to the array.
1938         GPRReg propertyReg = property.gpr();
1939         FPRReg valueReg = value.fpr();
1940         m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
1941         
1942         noResult(m_currentNode);
1943         return;
1944     }
1945     
1946     GPRTemporary temporary;
1947     GPRReg temporaryReg = temporaryRegisterForPutByVal(temporary, node);
1948
1949     MacroAssembler::Jump slowCase;
1950     
1951     if (arrayMode.isInBounds()) {
1952         speculationCheck(
1953             OutOfBounds, JSValueRegs(), 0,
1954             m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())));
1955     } else {
1956         MacroAssembler::Jump inBounds = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
1957         
1958         slowCase = m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfVectorLength()));
1959         
1960         if (!arrayMode.isOutOfBounds())
1961             speculationCheck(OutOfBounds, JSValueRegs(), 0, slowCase);
1962         
1963         m_jit.add32(TrustedImm32(1), propertyReg, temporaryReg);
1964         m_jit.store32(temporaryReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
1965         
1966         inBounds.link(&m_jit);
1967     }
1968     
1969     m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
1970
1971     base.use();
1972     property.use();
1973     value.use();
1974     storage.use();
1975     
1976     if (arrayMode.isOutOfBounds()) {
1977         addSlowPathGenerator(
1978             slowPathCall(
1979                 slowCase, this,
1980                 m_jit.codeBlock()->isStrictMode() ? operationPutDoubleByValBeyondArrayBoundsStrict : operationPutDoubleByValBeyondArrayBoundsNonStrict,
1981                 NoResult, baseReg, propertyReg, valueReg));
1982     }
1983
1984     noResult(m_currentNode, UseChildrenCalledExplicitly);
1985 }
1986
1987 void SpeculativeJIT::compileGetCharCodeAt(Node* node)
1988 {
1989     SpeculateCellOperand string(this, node->child1());
1990     SpeculateStrictInt32Operand index(this, node->child2());
1991     StorageOperand storage(this, node->child3());
1992
1993     GPRReg stringReg = string.gpr();
1994     GPRReg indexReg = index.gpr();
1995     GPRReg storageReg = storage.gpr();
1996     
1997     ASSERT(speculationChecked(m_state.forNode(node->child1()).m_type, SpecString));
1998
1999     // unsigned comparison so we can filter out negative indices and indices that are too large
2000     speculationCheck(Uncountable, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::AboveOrEqual, indexReg, MacroAssembler::Address(stringReg, JSString::offsetOfLength())));
2001
2002     GPRTemporary scratch(this);
2003     GPRReg scratchReg = scratch.gpr();
2004
2005     m_jit.loadPtr(MacroAssembler::Address(stringReg, JSString::offsetOfValue()), scratchReg);
2006
2007     // Load the character into scratchReg
2008     JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
2009
2010     m_jit.load8(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesOne, 0), scratchReg);
2011     JITCompiler::Jump cont8Bit = m_jit.jump();
2012
2013     is16Bit.link(&m_jit);
2014
2015     m_jit.load16(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesTwo, 0), scratchReg);
2016
2017     cont8Bit.link(&m_jit);
2018
2019     int32Result(scratchReg, m_currentNode);
2020 }
2021
2022 void SpeculativeJIT::compileGetByValOnString(Node* node)
2023 {
2024     SpeculateCellOperand base(this, node->child1());
2025     SpeculateStrictInt32Operand property(this, node->child2());
2026     StorageOperand storage(this, node->child3());
2027     GPRReg baseReg = base.gpr();
2028     GPRReg propertyReg = property.gpr();
2029     GPRReg storageReg = storage.gpr();
2030
2031     GPRTemporary scratch(this);
2032     GPRReg scratchReg = scratch.gpr();
2033 #if USE(JSVALUE32_64)
2034     GPRTemporary resultTag;
2035     GPRReg resultTagReg = InvalidGPRReg;
2036     if (node->arrayMode().isOutOfBounds()) {
2037         GPRTemporary realResultTag(this);
2038         resultTag.adopt(realResultTag);
2039         resultTagReg = resultTag.gpr();
2040     }
2041 #endif
2042
2043     ASSERT(ArrayMode(Array::String).alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2044
2045     // unsigned comparison so we can filter out negative indices and indices that are too large
2046     JITCompiler::Jump outOfBounds = m_jit.branch32(
2047         MacroAssembler::AboveOrEqual, propertyReg,
2048         MacroAssembler::Address(baseReg, JSString::offsetOfLength()));
2049     if (node->arrayMode().isInBounds())
2050         speculationCheck(OutOfBounds, JSValueRegs(), 0, outOfBounds);
2051
2052     m_jit.loadPtr(MacroAssembler::Address(baseReg, JSString::offsetOfValue()), scratchReg);
2053
2054     // Load the character into scratchReg
2055     JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
2056
2057     m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne, 0), scratchReg);
2058     JITCompiler::Jump cont8Bit = m_jit.jump();
2059
2060     is16Bit.link(&m_jit);
2061
2062     m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo, 0), scratchReg);
2063
2064     JITCompiler::Jump bigCharacter =
2065         m_jit.branch32(MacroAssembler::AboveOrEqual, scratchReg, TrustedImm32(0x100));
2066
2067     // 8 bit string values don't need the isASCII check.
2068     cont8Bit.link(&m_jit);
2069
2070     m_jit.lshift32(MacroAssembler::TrustedImm32(sizeof(void*) == 4 ? 2 : 3), scratchReg);
2071     m_jit.addPtr(TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), scratchReg);
2072     m_jit.loadPtr(scratchReg, scratchReg);
2073
2074     addSlowPathGenerator(
2075         slowPathCall(
2076             bigCharacter, this, operationSingleCharacterString, scratchReg, scratchReg));
2077
2078     if (node->arrayMode().isOutOfBounds()) {
2079 #if USE(JSVALUE32_64)
2080         m_jit.move(TrustedImm32(JSValue::CellTag), resultTagReg);
2081 #endif
2082
2083         JSGlobalObject* globalObject = m_jit.globalObjectFor(node->origin.semantic);
2084         bool prototypeChainIsSane = false;
2085         if (globalObject->stringPrototypeChainIsSane()) {
2086             // FIXME: This could be captured using a Speculation mode that means "out-of-bounds
2087             // loads return a trivial value". Something like SaneChainOutOfBounds. This should
2088             // speculate that we don't take negative out-of-bounds, or better yet, it should rely
2089             // on a stringPrototypeChainIsSane() guaranteeing that the prototypes have no negative
2090             // indexed properties either.
2091             // https://bugs.webkit.org/show_bug.cgi?id=144668
2092             m_jit.graph().watchpoints().addLazily(globalObject->stringPrototype()->structure()->transitionWatchpointSet());
2093             m_jit.graph().watchpoints().addLazily(globalObject->objectPrototype()->structure()->transitionWatchpointSet());
2094             prototypeChainIsSane = globalObject->stringPrototypeChainIsSane();
2095         }
2096         if (prototypeChainIsSane) {
2097             m_jit.graph().watchpoints().addLazily(globalObject->stringPrototype()->structure()->transitionWatchpointSet());
2098             m_jit.graph().watchpoints().addLazily(globalObject->objectPrototype()->structure()->transitionWatchpointSet());
2099             
2100 #if USE(JSVALUE64)
2101             addSlowPathGenerator(std::make_unique<SaneStringGetByValSlowPathGenerator>(
2102                 outOfBounds, this, JSValueRegs(scratchReg), baseReg, propertyReg));
2103 #else
2104             addSlowPathGenerator(std::make_unique<SaneStringGetByValSlowPathGenerator>(
2105                 outOfBounds, this, JSValueRegs(resultTagReg, scratchReg),
2106                 baseReg, propertyReg));
2107 #endif
2108         } else {
2109 #if USE(JSVALUE64)
2110             addSlowPathGenerator(
2111                 slowPathCall(
2112                     outOfBounds, this, operationGetByValStringInt,
2113                     scratchReg, baseReg, propertyReg));
2114 #else
2115             addSlowPathGenerator(
2116                 slowPathCall(
2117                     outOfBounds, this, operationGetByValStringInt,
2118                     JSValueRegs(resultTagReg, scratchReg), baseReg, propertyReg));
2119 #endif
2120         }
2121         
2122 #if USE(JSVALUE64)
2123         jsValueResult(scratchReg, m_currentNode);
2124 #else
2125         jsValueResult(resultTagReg, scratchReg, m_currentNode);
2126 #endif
2127     } else
2128         cellResult(scratchReg, m_currentNode);
2129 }
2130
2131 void SpeculativeJIT::compileFromCharCode(Node* node)
2132 {
2133     Edge& child = node->child1();
2134     if (child.useKind() == UntypedUse) {
2135         JSValueOperand opr(this, child);
2136         JSValueRegs oprRegs = opr.jsValueRegs();
2137 #if USE(JSVALUE64)
2138         GPRTemporary result(this);
2139         JSValueRegs resultRegs = JSValueRegs(result.gpr());
2140 #else
2141         GPRTemporary resultTag(this);
2142         GPRTemporary resultPayload(this);
2143         JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
2144 #endif
2145         flushRegisters();
2146         callOperation(operationStringFromCharCodeUntyped, resultRegs, oprRegs);
2147         m_jit.exceptionCheck();
2148         
2149         jsValueResult(resultRegs, node);
2150         return;
2151     }
2152
2153     SpeculateStrictInt32Operand property(this, child);
2154     GPRReg propertyReg = property.gpr();
2155     GPRTemporary smallStrings(this);
2156     GPRTemporary scratch(this);
2157     GPRReg scratchReg = scratch.gpr();
2158     GPRReg smallStringsReg = smallStrings.gpr();
2159
2160     JITCompiler::JumpList slowCases;
2161     slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, TrustedImm32(0xff)));
2162     m_jit.move(TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), smallStringsReg);
2163     m_jit.loadPtr(MacroAssembler::BaseIndex(smallStringsReg, propertyReg, MacroAssembler::ScalePtr, 0), scratchReg);
2164
2165     slowCases.append(m_jit.branchTest32(MacroAssembler::Zero, scratchReg));
2166     addSlowPathGenerator(slowPathCall(slowCases, this, operationStringFromCharCode, scratchReg, propertyReg));
2167     cellResult(scratchReg, m_currentNode);
2168 }
2169
2170 GeneratedOperandType SpeculativeJIT::checkGeneratedTypeForToInt32(Node* node)
2171 {
2172     VirtualRegister virtualRegister = node->virtualRegister();
2173     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
2174
2175     switch (info.registerFormat()) {
2176     case DataFormatStorage:
2177         RELEASE_ASSERT_NOT_REACHED();
2178
2179     case DataFormatBoolean:
2180     case DataFormatCell:
2181         terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
2182         return GeneratedOperandTypeUnknown;
2183
2184     case DataFormatNone:
2185     case DataFormatJSCell:
2186     case DataFormatJS:
2187     case DataFormatJSBoolean:
2188     case DataFormatJSDouble:
2189         return GeneratedOperandJSValue;
2190
2191     case DataFormatJSInt32:
2192     case DataFormatInt32:
2193         return GeneratedOperandInteger;
2194
2195     default:
2196         RELEASE_ASSERT_NOT_REACHED();
2197         return GeneratedOperandTypeUnknown;
2198     }
2199 }
2200
2201 void SpeculativeJIT::compileValueToInt32(Node* node)
2202 {
2203     switch (node->child1().useKind()) {
2204 #if USE(JSVALUE64)
2205     case Int52RepUse: {
2206         SpeculateStrictInt52Operand op1(this, node->child1());
2207         GPRTemporary result(this, Reuse, op1);
2208         GPRReg op1GPR = op1.gpr();
2209         GPRReg resultGPR = result.gpr();
2210         m_jit.zeroExtend32ToPtr(op1GPR, resultGPR);
2211         int32Result(resultGPR, node, DataFormatInt32);
2212         return;
2213     }
2214 #endif // USE(JSVALUE64)
2215         
2216     case DoubleRepUse: {
2217         GPRTemporary result(this);
2218         SpeculateDoubleOperand op1(this, node->child1());
2219         FPRReg fpr = op1.fpr();
2220         GPRReg gpr = result.gpr();
2221         JITCompiler::Jump notTruncatedToInteger = m_jit.branchTruncateDoubleToInt32(fpr, gpr, JITCompiler::BranchIfTruncateFailed);
2222         
2223         addSlowPathGenerator(slowPathCall(notTruncatedToInteger, this,
2224             hasSensibleDoubleToInt() ? operationToInt32SensibleSlow : operationToInt32, NeedToSpill, ExceptionCheckRequirement::CheckNotNeeded, gpr, fpr));
2225         
2226         int32Result(gpr, node);
2227         return;
2228     }
2229     
2230     case NumberUse:
2231     case NotCellUse: {
2232         switch (checkGeneratedTypeForToInt32(node->child1().node())) {
2233         case GeneratedOperandInteger: {
2234             SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
2235             GPRTemporary result(this, Reuse, op1);
2236             m_jit.move(op1.gpr(), result.gpr());
2237             int32Result(result.gpr(), node, op1.format());
2238             return;
2239         }
2240         case GeneratedOperandJSValue: {
2241             GPRTemporary result(this);
2242 #if USE(JSVALUE64)
2243             JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2244
2245             GPRReg gpr = op1.gpr();
2246             GPRReg resultGpr = result.gpr();
2247             FPRTemporary tempFpr(this);
2248             FPRReg fpr = tempFpr.fpr();
2249
2250             JITCompiler::Jump isInteger = m_jit.branch64(MacroAssembler::AboveOrEqual, gpr, GPRInfo::tagTypeNumberRegister);
2251             JITCompiler::JumpList converted;
2252
2253             if (node->child1().useKind() == NumberUse) {
2254                 DFG_TYPE_CHECK(
2255                     JSValueRegs(gpr), node->child1(), SpecBytecodeNumber,
2256                     m_jit.branchTest64(
2257                         MacroAssembler::Zero, gpr, GPRInfo::tagTypeNumberRegister));
2258             } else {
2259                 JITCompiler::Jump isNumber = m_jit.branchTest64(MacroAssembler::NonZero, gpr, GPRInfo::tagTypeNumberRegister);
2260                 
2261                 DFG_TYPE_CHECK(
2262                     JSValueRegs(gpr), node->child1(), ~SpecCell, m_jit.branchIfCell(JSValueRegs(gpr)));
2263                 
2264                 // It's not a cell: so true turns into 1 and all else turns into 0.
2265                 m_jit.compare64(JITCompiler::Equal, gpr, TrustedImm32(ValueTrue), resultGpr);
2266                 converted.append(m_jit.jump());
2267                 
2268                 isNumber.link(&m_jit);
2269             }
2270
2271             // First, if we get here we have a double encoded as a JSValue
2272             unboxDouble(gpr, resultGpr, fpr);
2273
2274             silentSpillAllRegisters(resultGpr);
2275             callOperation(operationToInt32, resultGpr, fpr);
2276             silentFillAllRegisters(resultGpr);
2277
2278             converted.append(m_jit.jump());
2279
2280             isInteger.link(&m_jit);
2281             m_jit.zeroExtend32ToPtr(gpr, resultGpr);
2282
2283             converted.link(&m_jit);
2284 #else
2285             Node* childNode = node->child1().node();
2286             VirtualRegister virtualRegister = childNode->virtualRegister();
2287             GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
2288
2289             JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2290
2291             GPRReg payloadGPR = op1.payloadGPR();
2292             GPRReg resultGpr = result.gpr();
2293         
2294             JITCompiler::JumpList converted;
2295
2296             if (info.registerFormat() == DataFormatJSInt32)
2297                 m_jit.move(payloadGPR, resultGpr);
2298             else {
2299                 GPRReg tagGPR = op1.tagGPR();
2300                 FPRTemporary tempFpr(this);
2301                 FPRReg fpr = tempFpr.fpr();
2302                 FPRTemporary scratch(this);
2303
2304                 JITCompiler::Jump isInteger = m_jit.branch32(MacroAssembler::Equal, tagGPR, TrustedImm32(JSValue::Int32Tag));
2305
2306                 if (node->child1().useKind() == NumberUse) {
2307                     DFG_TYPE_CHECK(
2308                         op1.jsValueRegs(), node->child1(), SpecBytecodeNumber,
2309                         m_jit.branch32(
2310                             MacroAssembler::AboveOrEqual, tagGPR,
2311                             TrustedImm32(JSValue::LowestTag)));
2312                 } else {
2313                     JITCompiler::Jump isNumber = m_jit.branch32(MacroAssembler::Below, tagGPR, TrustedImm32(JSValue::LowestTag));
2314                     
2315                     DFG_TYPE_CHECK(
2316                         op1.jsValueRegs(), node->child1(), ~SpecCell,
2317                         m_jit.branchIfCell(op1.jsValueRegs()));
2318                     
2319                     // It's not a cell: so true turns into 1 and all else turns into 0.
2320                     JITCompiler::Jump isBoolean = m_jit.branch32(JITCompiler::Equal, tagGPR, TrustedImm32(JSValue::BooleanTag));
2321                     m_jit.move(TrustedImm32(0), resultGpr);
2322                     converted.append(m_jit.jump());
2323                     
2324                     isBoolean.link(&m_jit);
2325                     m_jit.move(payloadGPR, resultGpr);
2326                     converted.append(m_jit.jump());
2327                     
2328                     isNumber.link(&m_jit);
2329                 }
2330
2331                 unboxDouble(tagGPR, payloadGPR, fpr, scratch.fpr());
2332
2333                 silentSpillAllRegisters(resultGpr);
2334                 callOperation(operationToInt32, resultGpr, fpr);
2335                 silentFillAllRegisters(resultGpr);
2336
2337                 converted.append(m_jit.jump());
2338
2339                 isInteger.link(&m_jit);
2340                 m_jit.move(payloadGPR, resultGpr);
2341
2342                 converted.link(&m_jit);
2343             }
2344 #endif
2345             int32Result(resultGpr, node);
2346             return;
2347         }
2348         case GeneratedOperandTypeUnknown:
2349             RELEASE_ASSERT(!m_compileOkay);
2350             return;
2351         }
2352         RELEASE_ASSERT_NOT_REACHED();
2353         return;
2354     }
2355     
2356     default:
2357         ASSERT(!m_compileOkay);
2358         return;
2359     }
2360 }
2361
2362 void SpeculativeJIT::compileUInt32ToNumber(Node* node)
2363 {
2364     if (doesOverflow(node->arithMode())) {
2365         if (enableInt52()) {
2366             SpeculateInt32Operand op1(this, node->child1());
2367             GPRTemporary result(this, Reuse, op1);
2368             m_jit.zeroExtend32ToPtr(op1.gpr(), result.gpr());
2369             strictInt52Result(result.gpr(), node);
2370             return;
2371         }
2372         SpeculateInt32Operand op1(this, node->child1());
2373         FPRTemporary result(this);
2374             
2375         GPRReg inputGPR = op1.gpr();
2376         FPRReg outputFPR = result.fpr();
2377             
2378         m_jit.convertInt32ToDouble(inputGPR, outputFPR);
2379             
2380         JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, inputGPR, TrustedImm32(0));
2381         m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), outputFPR);
2382         positive.link(&m_jit);
2383             
2384         doubleResult(outputFPR, node);
2385         return;
2386     }
2387     
2388     RELEASE_ASSERT(node->arithMode() == Arith::CheckOverflow);
2389
2390     SpeculateInt32Operand op1(this, node->child1());
2391     GPRTemporary result(this);
2392
2393     m_jit.move(op1.gpr(), result.gpr());
2394
2395     speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, result.gpr(), TrustedImm32(0)));
2396
2397     int32Result(result.gpr(), node, op1.format());
2398 }
2399
2400 void SpeculativeJIT::compileDoubleAsInt32(Node* node)
2401 {
2402     SpeculateDoubleOperand op1(this, node->child1());
2403     FPRTemporary scratch(this);
2404     GPRTemporary result(this);
2405     
2406     FPRReg valueFPR = op1.fpr();
2407     FPRReg scratchFPR = scratch.fpr();
2408     GPRReg resultGPR = result.gpr();
2409
2410     JITCompiler::JumpList failureCases;
2411     RELEASE_ASSERT(shouldCheckOverflow(node->arithMode()));
2412     m_jit.branchConvertDoubleToInt32(
2413         valueFPR, resultGPR, failureCases, scratchFPR,
2414         shouldCheckNegativeZero(node->arithMode()));
2415     speculationCheck(Overflow, JSValueRegs(), 0, failureCases);
2416
2417     int32Result(resultGPR, node);
2418 }
2419
2420 void SpeculativeJIT::compileDoubleRep(Node* node)
2421 {
2422     switch (node->child1().useKind()) {
2423     case RealNumberUse: {
2424         JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2425         FPRTemporary result(this);
2426         
2427         JSValueRegs op1Regs = op1.jsValueRegs();
2428         FPRReg resultFPR = result.fpr();
2429         
2430 #if USE(JSVALUE64)
2431         GPRTemporary temp(this);
2432         GPRReg tempGPR = temp.gpr();
2433         m_jit.unboxDoubleWithoutAssertions(op1Regs.gpr(), tempGPR, resultFPR);
2434 #else
2435         FPRTemporary temp(this);
2436         FPRReg tempFPR = temp.fpr();
2437         unboxDouble(op1Regs.tagGPR(), op1Regs.payloadGPR(), resultFPR, tempFPR);
2438 #endif
2439         
2440         JITCompiler::Jump done = m_jit.branchDouble(
2441             JITCompiler::DoubleEqual, resultFPR, resultFPR);
2442         
2443         DFG_TYPE_CHECK(
2444             op1Regs, node->child1(), SpecBytecodeRealNumber, m_jit.branchIfNotInt32(op1Regs));
2445         m_jit.convertInt32ToDouble(op1Regs.payloadGPR(), resultFPR);
2446         
2447         done.link(&m_jit);
2448         
2449         doubleResult(resultFPR, node);
2450         return;
2451     }
2452     
2453     case NotCellUse:
2454     case NumberUse: {
2455         ASSERT(!node->child1()->isNumberConstant()); // This should have been constant folded.
2456
2457         SpeculatedType possibleTypes = m_state.forNode(node->child1()).m_type;
2458         if (isInt32Speculation(possibleTypes)) {
2459             SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
2460             FPRTemporary result(this);
2461             m_jit.convertInt32ToDouble(op1.gpr(), result.fpr());
2462             doubleResult(result.fpr(), node);
2463             return;
2464         }
2465
2466         JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2467         FPRTemporary result(this);
2468
2469 #if USE(JSVALUE64)
2470         GPRTemporary temp(this);
2471
2472         GPRReg op1GPR = op1.gpr();
2473         GPRReg tempGPR = temp.gpr();
2474         FPRReg resultFPR = result.fpr();
2475         JITCompiler::JumpList done;
2476
2477         JITCompiler::Jump isInteger = m_jit.branch64(
2478             MacroAssembler::AboveOrEqual, op1GPR, GPRInfo::tagTypeNumberRegister);
2479
2480         if (node->child1().useKind() == NotCellUse) {
2481             JITCompiler::Jump isNumber = m_jit.branchTest64(MacroAssembler::NonZero, op1GPR, GPRInfo::tagTypeNumberRegister);
2482             JITCompiler::Jump isUndefined = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueUndefined));
2483
2484             static const double zero = 0;
2485             m_jit.loadDouble(TrustedImmPtr(&zero), resultFPR);
2486
2487             JITCompiler::Jump isNull = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueNull));
2488             done.append(isNull);
2489
2490             DFG_TYPE_CHECK(JSValueRegs(op1GPR), node->child1(), ~SpecCell,
2491                 m_jit.branchTest64(JITCompiler::Zero, op1GPR, TrustedImm32(static_cast<int32_t>(TagBitBool))));
2492
2493             JITCompiler::Jump isFalse = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueFalse));
2494             static const double one = 1;
2495             m_jit.loadDouble(TrustedImmPtr(&one), resultFPR);
2496             done.append(m_jit.jump());
2497             done.append(isFalse);
2498
2499             isUndefined.link(&m_jit);
2500             static const double NaN = PNaN;
2501             m_jit.loadDouble(TrustedImmPtr(&NaN), resultFPR);
2502             done.append(m_jit.jump());
2503
2504             isNumber.link(&m_jit);
2505         } else if (needsTypeCheck(node->child1(), SpecBytecodeNumber)) {
2506             typeCheck(
2507                 JSValueRegs(op1GPR), node->child1(), SpecBytecodeNumber,
2508                 m_jit.branchTest64(MacroAssembler::Zero, op1GPR, GPRInfo::tagTypeNumberRegister));
2509         }
2510
2511         unboxDouble(op1GPR, tempGPR, resultFPR);
2512         done.append(m_jit.jump());
2513     
2514         isInteger.link(&m_jit);
2515         m_jit.convertInt32ToDouble(op1GPR, resultFPR);
2516         done.link(&m_jit);
2517 #else // USE(JSVALUE64) -> this is the 32_64 case
2518         FPRTemporary temp(this);
2519     
2520         GPRReg op1TagGPR = op1.tagGPR();
2521         GPRReg op1PayloadGPR = op1.payloadGPR();
2522         FPRReg tempFPR = temp.fpr();
2523         FPRReg resultFPR = result.fpr();
2524         JITCompiler::JumpList done;
2525     
2526         JITCompiler::Jump isInteger = m_jit.branch32(
2527             MacroAssembler::Equal, op1TagGPR, TrustedImm32(JSValue::Int32Tag));
2528
2529         if (node->child1().useKind() == NotCellUse) {
2530             JITCompiler::Jump isNumber = m_jit.branch32(JITCompiler::Below, op1TagGPR, JITCompiler::TrustedImm32(JSValue::LowestTag + 1));
2531             JITCompiler::Jump isUndefined = m_jit.branch32(JITCompiler::Equal, op1TagGPR, TrustedImm32(JSValue::UndefinedTag));
2532
2533             static const double zero = 0;
2534             m_jit.loadDouble(TrustedImmPtr(&zero), resultFPR);
2535
2536             JITCompiler::Jump isNull = m_jit.branch32(JITCompiler::Equal, op1TagGPR, TrustedImm32(JSValue::NullTag));
2537             done.append(isNull);
2538
2539             DFG_TYPE_CHECK(JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), ~SpecCell, m_jit.branch32(JITCompiler::NotEqual, op1TagGPR, TrustedImm32(JSValue::BooleanTag)));
2540
2541             JITCompiler::Jump isFalse = m_jit.branchTest32(JITCompiler::Zero, op1PayloadGPR, TrustedImm32(1));
2542             static const double one = 1;
2543             m_jit.loadDouble(TrustedImmPtr(&one), resultFPR);
2544             done.append(m_jit.jump());
2545             done.append(isFalse);
2546
2547             isUndefined.link(&m_jit);
2548             static const double NaN = PNaN;
2549             m_jit.loadDouble(TrustedImmPtr(&NaN), resultFPR);
2550             done.append(m_jit.jump());
2551
2552             isNumber.link(&m_jit);
2553         } else if (needsTypeCheck(node->child1(), SpecBytecodeNumber)) {
2554             typeCheck(
2555                 JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), SpecBytecodeNumber,
2556                 m_jit.branch32(MacroAssembler::AboveOrEqual, op1TagGPR, TrustedImm32(JSValue::LowestTag)));
2557         }
2558
2559         unboxDouble(op1TagGPR, op1PayloadGPR, resultFPR, tempFPR);
2560         done.append(m_jit.jump());
2561     
2562         isInteger.link(&m_jit);
2563         m_jit.convertInt32ToDouble(op1PayloadGPR, resultFPR);
2564         done.link(&m_jit);
2565 #endif // USE(JSVALUE64)
2566     
2567         doubleResult(resultFPR, node);
2568         return;
2569     }
2570         
2571 #if USE(JSVALUE64)
2572     case Int52RepUse: {
2573         SpeculateStrictInt52Operand value(this, node->child1());
2574         FPRTemporary result(this);
2575         
2576         GPRReg valueGPR = value.gpr();
2577         FPRReg resultFPR = result.fpr();
2578
2579         m_jit.convertInt64ToDouble(valueGPR, resultFPR);
2580         
2581         doubleResult(resultFPR, node);
2582         return;
2583     }
2584 #endif // USE(JSVALUE64)
2585         
2586     default:
2587         RELEASE_ASSERT_NOT_REACHED();
2588         return;
2589     }
2590 }
2591
2592 void SpeculativeJIT::compileValueRep(Node* node)
2593 {
2594     switch (node->child1().useKind()) {
2595     case DoubleRepUse: {
2596         SpeculateDoubleOperand value(this, node->child1());
2597         JSValueRegsTemporary result(this);
2598         
2599         FPRReg valueFPR = value.fpr();
2600         JSValueRegs resultRegs = result.regs();
2601         
2602         // It's very tempting to in-place filter the value to indicate that it's not impure NaN
2603         // anymore. Unfortunately, this would be unsound. If it's a GetLocal or if the value was
2604         // subject to a prior SetLocal, filtering the value would imply that the corresponding
2605         // local was purified.
2606         if (needsTypeCheck(node->child1(), ~SpecDoubleImpureNaN))
2607             m_jit.purifyNaN(valueFPR);
2608
2609         boxDouble(valueFPR, resultRegs);
2610         
2611         jsValueResult(resultRegs, node);
2612         return;
2613     }
2614         
2615 #if USE(JSVALUE64)
2616     case Int52RepUse: {
2617         SpeculateStrictInt52Operand value(this, node->child1());
2618         GPRTemporary result(this);
2619         
2620         GPRReg valueGPR = value.gpr();
2621         GPRReg resultGPR = result.gpr();
2622         
2623         boxInt52(valueGPR, resultGPR, DataFormatStrictInt52);
2624         
2625         jsValueResult(resultGPR, node);
2626         return;
2627     }
2628 #endif // USE(JSVALUE64)
2629         
2630     default:
2631         RELEASE_ASSERT_NOT_REACHED();
2632         return;
2633     }
2634 }
2635
2636 static double clampDoubleToByte(double d)
2637 {
2638     d += 0.5;
2639     if (!(d > 0))
2640         d = 0;
2641     else if (d > 255)
2642         d = 255;
2643     return d;
2644 }
2645
2646 static void compileClampIntegerToByte(JITCompiler& jit, GPRReg result)
2647 {
2648     MacroAssembler::Jump inBounds = jit.branch32(MacroAssembler::BelowOrEqual, result, JITCompiler::TrustedImm32(0xff));
2649     MacroAssembler::Jump tooBig = jit.branch32(MacroAssembler::GreaterThan, result, JITCompiler::TrustedImm32(0xff));
2650     jit.xorPtr(result, result);
2651     MacroAssembler::Jump clamped = jit.jump();
2652     tooBig.link(&jit);
2653     jit.move(JITCompiler::TrustedImm32(255), result);
2654     clamped.link(&jit);
2655     inBounds.link(&jit);
2656 }
2657
2658 static void compileClampDoubleToByte(JITCompiler& jit, GPRReg result, FPRReg source, FPRReg scratch)
2659 {
2660     // Unordered compare so we pick up NaN
2661     static const double zero = 0;
2662     static const double byteMax = 255;
2663     static const double half = 0.5;
2664     jit.loadDouble(JITCompiler::TrustedImmPtr(&zero), scratch);
2665     MacroAssembler::Jump tooSmall = jit.branchDouble(MacroAssembler::DoubleLessThanOrEqualOrUnordered, source, scratch);
2666     jit.loadDouble(JITCompiler::TrustedImmPtr(&byteMax), scratch);
2667     MacroAssembler::Jump tooBig = jit.branchDouble(MacroAssembler::DoubleGreaterThan, source, scratch);
2668     
2669     jit.loadDouble(JITCompiler::TrustedImmPtr(&half), scratch);
2670     // FIXME: This should probably just use a floating point round!
2671     // https://bugs.webkit.org/show_bug.cgi?id=72054
2672     jit.addDouble(source, scratch);
2673     jit.truncateDoubleToInt32(scratch, result);   
2674     MacroAssembler::Jump truncatedInt = jit.jump();
2675     
2676     tooSmall.link(&jit);
2677     jit.xorPtr(result, result);
2678     MacroAssembler::Jump zeroed = jit.jump();
2679     
2680     tooBig.link(&jit);
2681     jit.move(JITCompiler::TrustedImm32(255), result);
2682     
2683     truncatedInt.link(&jit);
2684     zeroed.link(&jit);
2685
2686 }
2687
2688 JITCompiler::Jump SpeculativeJIT::jumpForTypedArrayOutOfBounds(Node* node, GPRReg baseGPR, GPRReg indexGPR)
2689 {
2690     if (node->op() == PutByValAlias)
2691         return JITCompiler::Jump();
2692     JSArrayBufferView* view = m_jit.graph().tryGetFoldableView(
2693         m_state.forNode(m_jit.graph().child(node, 0)).m_value, node->arrayMode());
2694     if (view) {
2695         uint32_t length = view->length();
2696         Node* indexNode = m_jit.graph().child(node, 1).node();
2697         if (indexNode->isInt32Constant() && indexNode->asUInt32() < length)
2698             return JITCompiler::Jump();
2699         return m_jit.branch32(
2700             MacroAssembler::AboveOrEqual, indexGPR, MacroAssembler::Imm32(length));
2701     }
2702     return m_jit.branch32(
2703         MacroAssembler::AboveOrEqual, indexGPR,
2704         MacroAssembler::Address(baseGPR, JSArrayBufferView::offsetOfLength()));
2705 }
2706
2707 void SpeculativeJIT::emitTypedArrayBoundsCheck(Node* node, GPRReg baseGPR, GPRReg indexGPR)
2708 {
2709     JITCompiler::Jump jump = jumpForTypedArrayOutOfBounds(node, baseGPR, indexGPR);
2710     if (!jump.isSet())
2711         return;
2712     speculationCheck(OutOfBounds, JSValueRegs(), 0, jump);
2713 }
2714
2715 JITCompiler::Jump SpeculativeJIT::jumpForTypedArrayIsNeuteredIfOutOfBounds(Node* node, GPRReg base, JITCompiler::Jump outOfBounds)
2716 {
2717     JITCompiler::Jump done;
2718     if (outOfBounds.isSet()) {
2719         done = m_jit.jump();
2720         if (node->arrayMode().isInBounds())
2721             speculationCheck(OutOfBounds, JSValueSource(), 0, outOfBounds);
2722         else {
2723             outOfBounds.link(&m_jit);
2724
2725             JITCompiler::Jump notWasteful = m_jit.branch32(
2726                 MacroAssembler::NotEqual,
2727                 MacroAssembler::Address(base, JSArrayBufferView::offsetOfMode()),
2728                 TrustedImm32(WastefulTypedArray));
2729
2730             JITCompiler::Jump hasNullVector = m_jit.branchTestPtr(
2731                 MacroAssembler::Zero,
2732                 MacroAssembler::Address(base, JSArrayBufferView::offsetOfVector()));
2733             speculationCheck(Uncountable, JSValueSource(), node, hasNullVector);
2734             notWasteful.link(&m_jit);
2735         }
2736     }
2737     return done;
2738 }
2739
2740 void SpeculativeJIT::compileGetByValOnIntTypedArray(Node* node, TypedArrayType type)
2741 {
2742     ASSERT(isInt(type));
2743     
2744     SpeculateCellOperand base(this, node->child1());
2745     SpeculateStrictInt32Operand property(this, node->child2());
2746     StorageOperand storage(this, node->child3());
2747
2748     GPRReg baseReg = base.gpr();
2749     GPRReg propertyReg = property.gpr();
2750     GPRReg storageReg = storage.gpr();
2751
2752     GPRTemporary result(this);
2753     GPRReg resultReg = result.gpr();
2754
2755     ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2756
2757     emitTypedArrayBoundsCheck(node, baseReg, propertyReg);
2758     switch (elementSize(type)) {
2759     case 1:
2760         if (isSigned(type))
2761             m_jit.load8SignedExtendTo32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2762         else
2763             m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2764         break;
2765     case 2:
2766         if (isSigned(type))
2767             m_jit.load16SignedExtendTo32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2768         else
2769             m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2770         break;
2771     case 4:
2772         m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
2773         break;
2774     default:
2775         CRASH();
2776     }
2777     if (elementSize(type) < 4 || isSigned(type)) {
2778         int32Result(resultReg, node);
2779         return;
2780     }
2781     
2782     ASSERT(elementSize(type) == 4 && !isSigned(type));
2783     if (node->shouldSpeculateInt32()) {
2784         speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, resultReg, TrustedImm32(0)));
2785         int32Result(resultReg, node);
2786         return;
2787     }
2788     
2789 #if USE(JSVALUE64)
2790     if (node->shouldSpeculateAnyInt()) {
2791         m_jit.zeroExtend32ToPtr(resultReg, resultReg);
2792         strictInt52Result(resultReg, node);
2793         return;
2794     }
2795 #endif
2796     
2797     FPRTemporary fresult(this);
2798     m_jit.convertInt32ToDouble(resultReg, fresult.fpr());
2799     JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, resultReg, TrustedImm32(0));
2800     m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), fresult.fpr());
2801     positive.link(&m_jit);
2802     doubleResult(fresult.fpr(), node);
2803 }
2804
2805 void SpeculativeJIT::compilePutByValForIntTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
2806 {
2807     ASSERT(isInt(type));
2808     
2809     StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
2810     GPRReg storageReg = storage.gpr();
2811     
2812     Edge valueUse = m_jit.graph().varArgChild(node, 2);
2813     
2814     GPRTemporary value;
2815 #if USE(JSVALUE32_64)
2816     GPRTemporary propertyTag;
2817     GPRTemporary valueTag;
2818 #endif
2819
2820     GPRReg valueGPR = InvalidGPRReg;
2821 #if USE(JSVALUE32_64)
2822     GPRReg propertyTagGPR = InvalidGPRReg;
2823     GPRReg valueTagGPR = InvalidGPRReg;
2824 #endif
2825
2826     JITCompiler::JumpList slowPathCases;
2827     
2828     bool isAppropriateConstant = false;
2829     if (valueUse->isConstant()) {
2830         JSValue jsValue = valueUse->asJSValue();
2831         SpeculatedType expectedType = typeFilterFor(valueUse.useKind());
2832         SpeculatedType actualType = speculationFromValue(jsValue);
2833         isAppropriateConstant = (expectedType | actualType) == expectedType;
2834     }
2835
2836     if (isAppropriateConstant) {
2837         JSValue jsValue = valueUse->asJSValue();
2838         if (!jsValue.isNumber()) {
2839             terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
2840             noResult(node);
2841             return;
2842         }
2843         double d = jsValue.asNumber();
2844         if (isClamped(type)) {
2845             ASSERT(elementSize(type) == 1);
2846             d = clampDoubleToByte(d);
2847         }
2848         GPRTemporary scratch(this);
2849         GPRReg scratchReg = scratch.gpr();
2850         m_jit.move(Imm32(toInt32(d)), scratchReg);
2851         value.adopt(scratch);
2852         valueGPR = scratchReg;
2853     } else {
2854         switch (valueUse.useKind()) {
2855         case Int32Use: {
2856             SpeculateInt32Operand valueOp(this, valueUse);
2857             GPRTemporary scratch(this);
2858             GPRReg scratchReg = scratch.gpr();
2859             m_jit.move(valueOp.gpr(), scratchReg);
2860             if (isClamped(type)) {
2861                 ASSERT(elementSize(type) == 1);
2862                 compileClampIntegerToByte(m_jit, scratchReg);
2863             }
2864             value.adopt(scratch);
2865             valueGPR = scratchReg;
2866             break;
2867         }
2868             
2869 #if USE(JSVALUE64)
2870         case Int52RepUse: {
2871             SpeculateStrictInt52Operand valueOp(this, valueUse);
2872             GPRTemporary scratch(this);
2873             GPRReg scratchReg = scratch.gpr();
2874             m_jit.move(valueOp.gpr(), scratchReg);
2875             if (isClamped(type)) {
2876                 ASSERT(elementSize(type) == 1);
2877                 MacroAssembler::Jump inBounds = m_jit.branch64(
2878                     MacroAssembler::BelowOrEqual, scratchReg, JITCompiler::TrustedImm64(0xff));
2879                 MacroAssembler::Jump tooBig = m_jit.branch64(
2880                     MacroAssembler::GreaterThan, scratchReg, JITCompiler::TrustedImm64(0xff));
2881                 m_jit.move(TrustedImm32(0), scratchReg);
2882                 MacroAssembler::Jump clamped = m_jit.jump();
2883                 tooBig.link(&m_jit);
2884                 m_jit.move(JITCompiler::TrustedImm32(255), scratchReg);
2885                 clamped.link(&m_jit);
2886                 inBounds.link(&m_jit);
2887             }
2888             value.adopt(scratch);
2889             valueGPR = scratchReg;
2890             break;
2891         }
2892 #endif // USE(JSVALUE64)
2893             
2894         case DoubleRepUse: {
2895             if (isClamped(type)) {
2896                 ASSERT(elementSize(type) == 1);
2897                 SpeculateDoubleOperand valueOp(this, valueUse);
2898                 GPRTemporary result(this);
2899                 FPRTemporary floatScratch(this);
2900                 FPRReg fpr = valueOp.fpr();
2901                 GPRReg gpr = result.gpr();
2902                 compileClampDoubleToByte(m_jit, gpr, fpr, floatScratch.fpr());
2903                 value.adopt(result);
2904                 valueGPR = gpr;
2905             } else {
2906 #if USE(JSVALUE32_64)
2907                 GPRTemporary realPropertyTag(this);
2908                 propertyTag.adopt(realPropertyTag);
2909                 propertyTagGPR = propertyTag.gpr();
2910
2911                 GPRTemporary realValueTag(this);
2912                 valueTag.adopt(realValueTag);
2913                 valueTagGPR = valueTag.gpr();
2914 #endif
2915                 SpeculateDoubleOperand valueOp(this, valueUse);
2916                 GPRTemporary result(this);
2917                 FPRReg fpr = valueOp.fpr();
2918                 GPRReg gpr = result.gpr();
2919                 MacroAssembler::Jump notNaN = m_jit.branchDouble(MacroAssembler::DoubleEqual, fpr, fpr);
2920                 m_jit.xorPtr(gpr, gpr);
2921                 MacroAssembler::JumpList fixed(m_jit.jump());
2922                 notNaN.link(&m_jit);
2923
2924                 fixed.append(m_jit.branchTruncateDoubleToInt32(
2925                     fpr, gpr, MacroAssembler::BranchIfTruncateSuccessful));
2926
2927 #if USE(JSVALUE64)
2928                 m_jit.or64(GPRInfo::tagTypeNumberRegister, property);
2929                 boxDouble(fpr, gpr);
2930 #else
2931                 m_jit.move(TrustedImm32(JSValue::Int32Tag), propertyTagGPR);
2932                 boxDouble(fpr, valueTagGPR, gpr);
2933 #endif
2934                 slowPathCases.append(m_jit.jump());
2935
2936                 fixed.link(&m_jit);
2937                 value.adopt(result);
2938                 valueGPR = gpr;
2939             }
2940             break;
2941         }
2942             
2943         default:
2944             RELEASE_ASSERT_NOT_REACHED();
2945             break;
2946         }
2947     }
2948     
2949     ASSERT_UNUSED(valueGPR, valueGPR != property);
2950     ASSERT(valueGPR != base);
2951     ASSERT(valueGPR != storageReg);
2952     JITCompiler::Jump outOfBounds = jumpForTypedArrayOutOfBounds(node, base, property);
2953
2954     switch (elementSize(type)) {
2955     case 1:
2956         m_jit.store8(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesOne));
2957         break;
2958     case 2:
2959         m_jit.store16(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesTwo));
2960         break;
2961     case 4:
2962         m_jit.store32(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
2963         break;
2964     default:
2965         CRASH();
2966     }
2967
2968     JITCompiler::Jump done = jumpForTypedArrayIsNeuteredIfOutOfBounds(node, base, outOfBounds);
2969     if (done.isSet())
2970         done.link(&m_jit);
2971
2972     if (!slowPathCases.empty()) {
2973 #if USE(JSVALUE64)
2974         if (node->op() == PutByValDirect) {
2975             addSlowPathGenerator(slowPathCall(
2976                 slowPathCases, this,
2977                 m_jit.isStrictModeFor(node->origin.semantic) ? operationPutByValDirectStrict : operationPutByValDirectNonStrict,
2978                 NoResult, base, property, valueGPR));
2979         } else {
2980             addSlowPathGenerator(slowPathCall(
2981                 slowPathCases, this,
2982                 m_jit.isStrictModeFor(node->origin.semantic) ? operationPutByValStrict : operationPutByValNonStrict,
2983                 NoResult, base, property, valueGPR));
2984         }
2985 #else // not USE(JSVALUE64)
2986         if (node->op() == PutByValDirect) {
2987             addSlowPathGenerator(slowPathCall(
2988                 slowPathCases, this,
2989                 m_jit.codeBlock()->isStrictMode() ? operationPutByValDirectCellStrict : operationPutByValDirectCellNonStrict,
2990                 NoResult, base, JSValueRegs(propertyTagGPR, property), JSValueRegs(valueTagGPR, valueGPR)));
2991         } else {
2992             addSlowPathGenerator(slowPathCall(
2993                 slowPathCases, this,
2994                 m_jit.codeBlock()->isStrictMode() ? operationPutByValCellStrict : operationPutByValCellNonStrict,
2995                 NoResult, base, JSValueRegs(propertyTagGPR, property), JSValueRegs(valueTagGPR, valueGPR)));
2996         }
2997 #endif
2998     }
2999     noResult(node);
3000 }
3001
3002 void SpeculativeJIT::compileGetByValOnFloatTypedArray(Node* node, TypedArrayType type)
3003 {
3004     ASSERT(isFloat(type));
3005     
3006     SpeculateCellOperand base(this, node->child1());
3007     SpeculateStrictInt32Operand property(this, node->child2());
3008     StorageOperand storage(this, node->child3());
3009
3010     GPRReg baseReg = base.gpr();
3011     GPRReg propertyReg = property.gpr();
3012     GPRReg storageReg = storage.gpr();
3013
3014     ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
3015
3016     FPRTemporary result(this);
3017     FPRReg resultReg = result.fpr();
3018     emitTypedArrayBoundsCheck(node, baseReg, propertyReg);
3019     switch (elementSize(type)) {
3020     case 4:
3021         m_jit.loadFloat(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
3022         m_jit.convertFloatToDouble(resultReg, resultReg);
3023         break;
3024     case 8: {
3025         m_jit.loadDouble(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight), resultReg);
3026         break;
3027     }
3028     default:
3029         RELEASE_ASSERT_NOT_REACHED();
3030     }
3031     
3032     doubleResult(resultReg, node);
3033 }
3034
3035 void SpeculativeJIT::compilePutByValForFloatTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
3036 {
3037     ASSERT(isFloat(type));
3038     
3039     StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
3040     GPRReg storageReg = storage.gpr();
3041     
3042     Edge baseUse = m_jit.graph().varArgChild(node, 0);
3043     Edge valueUse = m_jit.graph().varArgChild(node, 2);
3044
3045     SpeculateDoubleOperand valueOp(this, valueUse);
3046     FPRTemporary scratch(this);
3047     FPRReg valueFPR = valueOp.fpr();
3048     FPRReg scratchFPR = scratch.fpr();
3049
3050     ASSERT_UNUSED(baseUse, node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(baseUse)));
3051     
3052     MacroAssembler::Jump outOfBounds = jumpForTypedArrayOutOfBounds(node, base, property);
3053     
3054     switch (elementSize(type)) {
3055     case 4: {
3056         m_jit.moveDouble(valueFPR, scratchFPR);
3057         m_jit.convertDoubleToFloat(valueFPR, scratchFPR);
3058         m_jit.storeFloat(scratchFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
3059         break;
3060     }
3061     case 8:
3062         m_jit.storeDouble(valueFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesEight));
3063         break;
3064     default:
3065         RELEASE_ASSERT_NOT_REACHED();
3066     }
3067
3068     JITCompiler::Jump done = jumpForTypedArrayIsNeuteredIfOutOfBounds(node, base, outOfBounds);
3069     if (done.isSet())
3070         done.link(&m_jit);
3071     noResult(node);
3072 }
3073
3074 void SpeculativeJIT::compileInstanceOfForObject(Node*, GPRReg valueReg, GPRReg prototypeReg, GPRReg scratchReg, GPRReg scratch2Reg)
3075 {
3076     // Check that prototype is an object.
3077     speculationCheck(BadType, JSValueRegs(), 0, m_jit.branchIfNotObject(prototypeReg));
3078     
3079     // Initialize scratchReg with the value being checked.
3080     m_jit.move(valueReg, scratchReg);
3081     
3082     // Walk up the prototype chain of the value (in scratchReg), comparing to prototypeReg.
3083     MacroAssembler::Label loop(&m_jit);
3084     MacroAssembler::Jump performDefaultHasInstance = m_jit.branch8(MacroAssembler::Equal,
3085         MacroAssembler::Address(scratchReg, JSCell::typeInfoTypeOffset()), TrustedImm32(ProxyObjectType));
3086     m_jit.emitLoadStructure(scratchReg, scratchReg, scratch2Reg);
3087     m_jit.loadPtr(MacroAssembler::Address(scratchReg, Structure::prototypeOffset() + CellPayloadOffset), scratchReg);
3088     MacroAssembler::Jump isInstance = m_jit.branchPtr(MacroAssembler::Equal, scratchReg, prototypeReg);
3089 #if USE(JSVALUE64)
3090     m_jit.branchIfCell(JSValueRegs(scratchReg)).linkTo(loop, &m_jit);
3091 #else
3092     m_jit.branchTestPtr(MacroAssembler::NonZero, scratchReg).linkTo(loop, &m_jit);
3093 #endif
3094     
3095     // No match - result is false.
3096 #if USE(JSVALUE64)
3097     m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(false))), scratchReg);
3098 #else
3099     m_jit.move(MacroAssembler::TrustedImm32(0), scratchReg);
3100 #endif
3101     MacroAssembler::JumpList doneJumps; 
3102     doneJumps.append(m_jit.jump());
3103
3104     performDefaultHasInstance.link(&m_jit);
3105     silentSpillAllRegisters(scratchReg);
3106     callOperation(operationDefaultHasInstance, scratchReg, valueReg, prototypeReg); 
3107     silentFillAllRegisters(scratchReg);
3108     m_jit.exceptionCheck();
3109 #if USE(JSVALUE64)
3110     m_jit.or32(TrustedImm32(ValueFalse), scratchReg);
3111 #endif
3112     doneJumps.append(m_jit.jump());
3113     
3114     isInstance.link(&m_jit);
3115 #if USE(JSVALUE64)
3116     m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(true))), scratchReg);
3117 #else
3118     m_jit.move(MacroAssembler::TrustedImm32(1), scratchReg);
3119 #endif
3120     
3121     doneJumps.link(&m_jit);
3122 }
3123
3124 void SpeculativeJIT::compileCheckTypeInfoFlags(Node* node)
3125 {
3126     SpeculateCellOperand base(this, node->child1());
3127
3128     GPRReg baseGPR = base.gpr();
3129
3130     speculationCheck(BadTypeInfoFlags, JSValueRegs(), 0, m_jit.branchTest8(MacroAssembler::Zero, MacroAssembler::Address(baseGPR, JSCell::typeInfoFlagsOffset()), MacroAssembler::TrustedImm32(node->typeInfoOperand())));
3131
3132     noResult(node);
3133 }
3134
3135 void SpeculativeJIT::compileParseInt(Node* node)
3136 {
3137     RELEASE_ASSERT(node->child1().useKind() == UntypedUse || node->child1().useKind() == StringUse);
3138
3139     GPRFlushedCallResult resultPayload(this);
3140     GPRReg resultPayloadGPR = resultPayload.gpr();
3141 #if USE(JSVALUE64)
3142     JSValueRegs resultRegs(resultPayloadGPR);
3143 #else
3144     GPRFlushedCallResult2 resultTag(this);
3145     GPRReg resultTagGPR = resultTag.gpr();
3146     JSValueRegs resultRegs(resultTagGPR, resultPayloadGPR);
3147 #endif
3148
3149     if (node->child2()) {
3150         SpeculateInt32Operand radix(this, node->child2());
3151         GPRReg radixGPR = radix.gpr();
3152         if (node->child1().useKind() == UntypedUse) {
3153             JSValueOperand value(this, node->child1());
3154
3155             flushRegisters();
3156 #if USE(JSVALUE64)
3157             callOperation(operationParseIntGeneric, resultRegs.gpr(), value.gpr(), radixGPR);
3158 #else
3159             callOperation(operationParseIntGeneric, resultRegs, value.jsValueRegs(), radixGPR);
3160 #endif
3161             m_jit.exceptionCheck();
3162         } else {
3163             SpeculateCellOperand value(this, node->child1());
3164             GPRReg valueGPR = value.gpr();
3165             speculateString(node->child1(), valueGPR);
3166
3167             flushRegisters();
3168 #if USE(JSVALUE64)
3169             callOperation(operationParseIntString, resultRegs.gpr(), valueGPR, radixGPR);
3170 #else
3171             callOperation(operationParseIntString, resultRegs, valueGPR, radixGPR);
3172 #endif
3173             m_jit.exceptionCheck();
3174         }
3175     } else {
3176         if (node->child1().useKind() == UntypedUse) {
3177             JSValueOperand value(this, node->child1());
3178
3179             flushRegisters();
3180 #if USE(JSVALUE64)
3181             callOperation(operationParseIntNoRadixGeneric, resultRegs.gpr(), value.jsValueRegs());
3182 #else
3183             callOperation(operationParseIntNoRadixGeneric, resultRegs, value.jsValueRegs());
3184 #endif
3185             m_jit.exceptionCheck();
3186         } else {
3187             SpeculateCellOperand value(this, node->child1());
3188             GPRReg valueGPR = value.gpr();
3189             speculateString(node->child1(), valueGPR);
3190
3191             flushRegisters();
3192             callOperation(operationParseIntStringNoRadix, resultRegs, valueGPR);
3193             m_jit.exceptionCheck();
3194         }
3195     }
3196
3197     jsValueResult(resultRegs, node);
3198 }
3199
3200 void SpeculativeJIT::compileInstanceOf(Node* node)
3201 {
3202     if (node->child1().useKind() == UntypedUse) {
3203         // It might not be a cell. Speculate less aggressively.
3204         // Or: it might only be used once (i.e. by us), so we get zero benefit
3205         // from speculating any more aggressively than we absolutely need to.
3206         
3207         JSValueOperand value(this, node->child1());
3208         SpeculateCellOperand prototype(this, node->child2());
3209         GPRTemporary scratch(this);
3210         GPRTemporary scratch2(this);
3211         
3212         GPRReg prototypeReg = prototype.gpr();
3213         GPRReg scratchReg = scratch.gpr();
3214         GPRReg scratch2Reg = scratch2.gpr();
3215         
3216         MacroAssembler::Jump isCell = m_jit.branchIfCell(value.jsValueRegs());
3217         GPRReg valueReg = value.jsValueRegs().payloadGPR();
3218         moveFalseTo(scratchReg);
3219
3220         MacroAssembler::Jump done = m_jit.jump();
3221         
3222         isCell.link(&m_jit);
3223         
3224         compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg, scratch2Reg);
3225         
3226         done.link(&m_jit);
3227
3228         blessedBooleanResult(scratchReg, node);
3229         return;
3230     }
3231     
3232     SpeculateCellOperand value(this, node->child1());
3233     SpeculateCellOperand prototype(this, node->child2());
3234     
3235     GPRTemporary scratch(this);
3236     GPRTemporary scratch2(this);
3237     
3238     GPRReg valueReg = value.gpr();
3239     GPRReg prototypeReg = prototype.gpr();
3240     GPRReg scratchReg = scratch.gpr();
3241     GPRReg scratch2Reg = scratch2.gpr();
3242     
3243     compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg, scratch2Reg);
3244
3245     blessedBooleanResult(scratchReg, node);
3246 }
3247
3248 template<typename SnippetGenerator, J_JITOperation_EJJ snippetSlowPathFunction>
3249 void SpeculativeJIT::emitUntypedBitOp(Node* node)
3250 {
3251     Edge& leftChild = node->child1();
3252     Edge& rightChild = node->child2();
3253
3254     if (isKnownNotNumber(leftChild.node()) || isKnownNotNumber(rightChild.node())) {
3255         JSValueOperand left(this, leftChild);
3256         JSValueOperand right(this, rightChild);
3257         JSValueRegs leftRegs = left.jsValueRegs();
3258         JSValueRegs rightRegs = right.jsValueRegs();
3259 #if USE(JSVALUE64)
3260         GPRTemporary result(this);
3261         JSValueRegs resultRegs = JSValueRegs(result.gpr());
3262 #else
3263         GPRTemporary resultTag(this);
3264         GPRTemporary resultPayload(this);
3265         JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3266 #endif
3267         flushRegisters();
3268         callOperation(snippetSlowPathFunction, resultRegs, leftRegs, rightRegs);
3269         m_jit.exceptionCheck();
3270
3271         jsValueResult(resultRegs, node);
3272         return;
3273     }
3274
3275     std::optional<JSValueOperand> left;
3276     std::optional<JSValueOperand> right;
3277
3278     JSValueRegs leftRegs;
3279     JSValueRegs rightRegs;
3280
3281 #if USE(JSVALUE64)
3282     GPRTemporary result(this);
3283     JSValueRegs resultRegs = JSValueRegs(result.gpr());
3284     GPRTemporary scratch(this);
3285     GPRReg scratchGPR = scratch.gpr();
3286 #else
3287     GPRTemporary resultTag(this);
3288     GPRTemporary resultPayload(this);
3289     JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3290     GPRReg scratchGPR = resultTag.gpr();
3291 #endif
3292
3293     SnippetOperand leftOperand;
3294     SnippetOperand rightOperand;
3295
3296     // The snippet generator does not support both operands being constant. If the left
3297     // operand is already const, we'll ignore the right operand's constness.
3298     if (leftChild->isInt32Constant())
3299         leftOperand.setConstInt32(leftChild->asInt32());
3300     else if (rightChild->isInt32Constant())
3301         rightOperand.setConstInt32(rightChild->asInt32());
3302
3303     RELEASE_ASSERT(!leftOperand.isConst() || !rightOperand.isConst());
3304
3305     if (!leftOperand.isConst()) {
3306         left.emplace(this, leftChild);
3307         leftRegs = left->jsValueRegs();
3308     }
3309     if (!rightOperand.isConst()) {
3310         right.emplace(this, rightChild);
3311         rightRegs = right->jsValueRegs();
3312     }
3313
3314     SnippetGenerator gen(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs, scratchGPR);
3315     gen.generateFastPath(m_jit);
3316
3317     ASSERT(gen.didEmitFastPath());
3318     gen.endJumpList().append(m_jit.jump());
3319
3320     gen.slowPathJumpList().link(&m_jit);
3321     silentSpillAllRegisters(resultRegs);
3322
3323     if (leftOperand.isConst()) {
3324         leftRegs = resultRegs;
3325         m_jit.moveValue(leftChild->asJSValue(), leftRegs);
3326     } else if (rightOperand.isConst()) {
3327         rightRegs = resultRegs;
3328         m_jit.moveValue(rightChild->asJSValue(), rightRegs);
3329     }
3330
3331     callOperation(snippetSlowPathFunction, resultRegs, leftRegs, rightRegs);
3332
3333     silentFillAllRegisters(resultRegs);
3334     m_jit.exceptionCheck();
3335
3336     gen.endJumpList().link(&m_jit);
3337     jsValueResult(resultRegs, node);
3338 }
3339
3340 void SpeculativeJIT::compileBitwiseOp(Node* node)
3341 {
3342     NodeType op = node->op();
3343     Edge& leftChild = node->child1();
3344     Edge& rightChild = node->child2();
3345
3346     if (leftChild.useKind() == UntypedUse || rightChild.useKind() == UntypedUse) {
3347         switch (op) {
3348         case BitAnd:
3349             emitUntypedBitOp<JITBitAndGenerator, operationValueBitAnd>(node);
3350             return;
3351         case BitOr:
3352             emitUntypedBitOp<JITBitOrGenerator, operationValueBitOr>(node);
3353             return;
3354         case BitXor:
3355             emitUntypedBitOp<JITBitXorGenerator, operationValueBitXor>(node);
3356             return;
3357         default:
3358             RELEASE_ASSERT_NOT_REACHED();
3359         }
3360     }
3361
3362     if (leftChild->isInt32Constant()) {
3363         SpeculateInt32Operand op2(this, rightChild);
3364         GPRTemporary result(this, Reuse, op2);
3365
3366         bitOp(op, leftChild->asInt32(), op2.gpr(), result.gpr());
3367
3368         int32Result(result.gpr(), node);
3369
3370     } else if (rightChild->isInt32Constant()) {
3371         SpeculateInt32Operand op1(this, leftChild);
3372         GPRTemporary result(this, Reuse, op1);
3373
3374         bitOp(op, rightChild->asInt32(), op1.gpr(), result.gpr());
3375
3376         int32Result(result.gpr(), node);
3377
3378     } else {
3379         SpeculateInt32Operand op1(this, leftChild);
3380         SpeculateInt32Operand op2(this, rightChild);
3381         GPRTemporary result(this, Reuse, op1, op2);
3382         
3383         GPRReg reg1 = op1.gpr();
3384         GPRReg reg2 = op2.gpr();
3385         bitOp(op, reg1, reg2, result.gpr());
3386         
3387         int32Result(result.gpr(), node);
3388     }
3389 }
3390
3391 void SpeculativeJIT::emitUntypedRightShiftBitOp(Node* node)
3392 {
3393     J_JITOperation_EJJ snippetSlowPathFunction = node->op() == BitRShift
3394         ? operationValueBitRShift : operationValueBitURShift;
3395     JITRightShiftGenerator::ShiftType shiftType = node->op() == BitRShift
3396         ? JITRightShiftGenerator::SignedShift : JITRightShiftGenerator::UnsignedShift;
3397
3398     Edge& leftChild = node->child1();
3399     Edge& rightChild = node->child2();
3400
3401     if (isKnownNotNumber(leftChild.node()) || isKnownNotNumber(rightChild.node())) {
3402         JSValueOperand left(this, leftChild);
3403         JSValueOperand right(this, rightChild);
3404         JSValueRegs leftRegs = left.jsValueRegs();
3405         JSValueRegs rightRegs = right.jsValueRegs();
3406 #if USE(JSVALUE64)
3407         GPRTemporary result(this);
3408         JSValueRegs resultRegs = JSValueRegs(result.gpr());
3409 #else
3410         GPRTemporary resultTag(this);
3411         GPRTemporary resultPayload(this);
3412         JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3413 #endif
3414         flushRegisters();
3415         callOperation(snippetSlowPathFunction, resultRegs, leftRegs, rightRegs);
3416         m_jit.exceptionCheck();
3417
3418         jsValueResult(resultRegs, node);
3419         return;
3420     }
3421
3422     std::optional<JSValueOperand> left;
3423     std::optional<JSValueOperand> right;
3424
3425     JSValueRegs leftRegs;
3426     JSValueRegs rightRegs;
3427
3428     FPRTemporary leftNumber(this);
3429     FPRReg leftFPR = leftNumber.fpr();
3430
3431 #if USE(JSVALUE64)
3432     GPRTemporary result(this);
3433     JSValueRegs resultRegs = JSValueRegs(result.gpr());
3434     GPRTemporary scratch(this);
3435     GPRReg scratchGPR = scratch.gpr();
3436     FPRReg scratchFPR = InvalidFPRReg;
3437 #else
3438     GPRTemporary resultTag(this);
3439     GPRTemporary resultPayload(this);
3440     JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3441     GPRReg scratchGPR = resultTag.gpr();
3442     FPRTemporary fprScratch(this);
3443     FPRReg scratchFPR = fprScratch.fpr();
3444 #endif
3445
3446     SnippetOperand leftOperand;
3447     SnippetOperand rightOperand;
3448
3449     // The snippet generator does not support both operands being constant. If the left
3450     // operand is already const, we'll ignore the right operand's constness.
3451     if (leftChild->isInt32Constant())
3452         leftOperand.setConstInt32(leftChild->asInt32());
3453     else if (rightChild->isInt32Constant())
3454         rightOperand.setConstInt32(rightChild->asInt32());
3455
3456     RELEASE_ASSERT(!leftOperand.isConst() || !rightOperand.isConst());
3457
3458     if (!leftOperand.isConst()) {
3459         left.emplace(this, leftChild);
3460         leftRegs = left->jsValueRegs();
3461     }
3462     if (!rightOperand.isConst()) {
3463         right.emplace(this, rightChild);
3464         rightRegs = right->jsValueRegs();
3465     }
3466
3467     JITRightShiftGenerator gen(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs,
3468         leftFPR, scratchGPR, scratchFPR, shiftType);
3469     gen.generateFastPath(m_jit);
3470
3471     ASSERT(gen.didEmitFastPath());
3472     gen.endJumpList().append(m_jit.jump());
3473
3474     gen.slowPathJumpList().link(&m_jit);
3475     silentSpillAllRegisters(resultRegs);
3476
3477     if (leftOperand.isConst()) {
3478         leftRegs = resultRegs;
3479         m_jit.moveValue(leftChild->asJSValue(), leftRegs);
3480     } else if (rightOperand.isConst()) {
3481         rightRegs = resultRegs;
3482         m_jit.moveValue(rightChild->asJSValue(), rightRegs);
3483     }
3484
3485     callOperation(snippetSlowPathFunction, resultRegs, leftRegs, rightRegs);
3486
3487     silentFillAllRegisters(resultRegs);
3488     m_jit.exceptionCheck();
3489
3490     gen.endJumpList().link(&m_jit);
3491     jsValueResult(resultRegs, node);
3492     return;
3493 }
3494
3495 void SpeculativeJIT::compileShiftOp(Node* node)
3496 {
3497     NodeType op = node->op();
3498     Edge& leftChild = node->child1();
3499     Edge& rightChild = node->child2();
3500
3501     if (leftChild.useKind() == UntypedUse || rightChild.useKind() == UntypedUse) {
3502         switch (op) {
3503         case BitLShift:
3504             emitUntypedBitOp<JITLeftShiftGenerator, operationValueBitLShift>(node);
3505             return;
3506         case BitRShift:
3507         case BitURShift:
3508             emitUntypedRightShiftBitOp(node);
3509             return;
3510         default:
3511             RELEASE_ASSERT_NOT_REACHED();
3512         }
3513     }
3514
3515     if (rightChild->isInt32Constant()) {
3516         SpeculateInt32Operand op1(this, leftChild);
3517         GPRTemporary result(this, Reuse, op1);
3518
3519         shiftOp(op, op1.gpr(), rightChild->asInt32() & 0x1f, result.gpr());
3520
3521         int32Result(result.gpr(), node);
3522     } else {
3523         // Do not allow shift amount to be used as the result, MacroAssembler does not permit this.
3524         SpeculateInt32Operand op1(this, leftChild);
3525         SpeculateInt32Operand op2(this, rightChild);
3526         GPRTemporary result(this, Reuse, op1);
3527
3528         GPRReg reg1 = op1.gpr();
3529         GPRReg reg2 = op2.gpr();
3530         shiftOp(op, reg1, reg2, result.gpr());
3531
3532         int32Result(result.gpr(), node);
3533     }
3534 }
3535
3536 void SpeculativeJIT::compileValueAdd(Node* node)
3537 {
3538     Edge& leftChild = node->child1();
3539     Edge& rightChild = node->child2();
3540
3541     if (isKnownNotNumber(leftChild.node()) || isKnownNotNumber(rightChild.node())) {
3542         JSValueOperand left(this, leftChild);
3543         JSValueOperand right(this, rightChild);
3544         JSValueRegs leftRegs = left.jsValueRegs();
3545         JSValueRegs rightRegs = right.jsValueRegs();
3546 #if USE(JSVALUE64)
3547         GPRTemporary result(this);
3548         JSValueRegs resultRegs = JSValueRegs(result.gpr());
3549 #else
3550         GPRTemporary resultTag(this);
3551         GPRTemporary resultPayload(this);
3552         JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3553 #endif
3554         flushRegisters();
3555         callOperation(operationValueAddNotNumber, resultRegs, leftRegs, rightRegs);
3556         m_jit.exceptionCheck();
3557     
3558         jsValueResult(resultRegs, node);
3559         return;
3560     }
3561
3562 #if USE(JSVALUE64)
3563     bool needsScratchGPRReg = true;
3564     bool needsScratchFPRReg = false;
3565 #else
3566     bool needsScratchGPRReg = true;
3567     bool needsScratchFPRReg = true;
3568 #endif
3569
3570     ArithProfile* arithProfile = m_jit.graph().baselineCodeBlockFor(node->origin.semantic)->arithProfileForBytecodeOffset(node->origin.semantic.bytecodeIndex);
3571     JITAddIC* addIC = m_jit.codeBlock()->addJITAddIC(arithProfile);
3572     auto repatchingFunction = operationValueAddOptimize;
3573     auto nonRepatchingFunction = operationValueAdd;
3574     
3575     compileMathIC(node, addIC, needsScratchGPRReg, needsScratchFPRReg, repatchingFunction, nonRepatchingFunction);
3576 }
3577
3578 template <typename Generator, typename RepatchingFunction, typename NonRepatchingFunction>
3579 void SpeculativeJIT::compileMathIC(Node* node, JITBinaryMathIC<Generator>* mathIC, bool needsScratchGPRReg, bool needsScratchFPRReg, RepatchingFunction repatchingFunction, NonRepatchingFunction nonRepatchingFunction)
3580 {
3581     Edge& leftChild = node->child1();
3582     Edge& rightChild = node->child2();
3583
3584     std::optional<JSValueOperand> left;
3585     std::optional<JSValueOperand> right;
3586
3587     JSValueRegs leftRegs;
3588     JSValueRegs rightRegs;
3589
3590     FPRTemporary leftNumber(this);
3591     FPRTemporary rightNumber(this);
3592     FPRReg leftFPR = leftNumber.fpr();
3593     FPRReg rightFPR = rightNumber.fpr();
3594
3595     GPRReg scratchGPR = InvalidGPRReg;
3596     FPRReg scratchFPR = InvalidFPRReg;
3597
3598     std::optional<FPRTemporary> fprScratch;
3599     if (needsScratchFPRReg) {
3600         fprScratch.emplace(this);
3601         scratchFPR = fprScratch->fpr();
3602     }
3603
3604 #if USE(JSVALUE64)
3605     std::optional<GPRTemporary> gprScratch;
3606     if (needsScratchGPRReg) {
3607         gprScratch.emplace(this);
3608         scratchGPR = gprScratch->gpr();
3609     }
3610     GPRTemporary result(this);
3611     JSValueRegs resultRegs = JSValueRegs(result.gpr());
3612 #else
3613     GPRTemporary resultTag(this);
3614     GPRTemporary resultPayload(this);
3615     JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3616     if (needsScratchGPRReg)
3617         scratchGPR = resultRegs.tagGPR();
3618 #endif
3619
3620     SnippetOperand leftOperand(m_state.forNode(leftChild).resultType());
3621     SnippetOperand rightOperand(m_state.forNode(rightChild).resultType());
3622
3623     // The snippet generator does not support both operands being constant. If the left
3624     // operand is already const, we'll ignore the right operand's constness.
3625     if (leftChild->isInt32Constant())
3626         leftOperand.setConstInt32(leftChild->asInt32());
3627     else if (rightChild->isInt32Constant())
3628         rightOperand.setConstInt32(rightChild->asInt32());
3629
3630     ASSERT(!leftOperand.isConst() || !rightOperand.isConst());
3631     ASSERT(!(Generator::isLeftOperandValidConstant(leftOperand) && Generator::isRightOperandValidConstant(rightOperand)));
3632
3633     if (!Generator::isLeftOperandValidConstant(leftOperand)) {
3634         left.emplace(this, leftChild);
3635         leftRegs = left->jsValueRegs();
3636     }
3637     if (!Generator::isRightOperandValidConstant(rightOperand)) {
3638         right.emplace(this, rightChild);
3639         rightRegs = right->jsValueRegs();
3640     }
3641
3642 #if ENABLE(MATH_IC_STATS)
3643     auto inlineStart = m_jit.label();
3644 #endif
3645
3646     Box<MathICGenerationState> addICGenerationState = Box<MathICGenerationState>::create();
3647     mathIC->m_generator = Generator(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs, leftFPR, rightFPR, scratchGPR, scratchFPR);
3648
3649     bool shouldEmitProfiling = false;
3650     bool generatedInline = mathIC->generateInline(m_jit, *addICGenerationState, shouldEmitProfiling);
3651
3652     if (generatedInline) {
3653         ASSERT(!addICGenerationState->slowPathJumps.empty());
3654
3655         Vector<SilentRegisterSavePlan> savePlans;
3656         silentSpillAllRegistersImpl(false, savePlans, resultRegs);
3657
3658         auto done = m_jit.label();
3659
3660         addSlowPathGenerator([=, savePlans = WTFMove(savePlans)] () {
3661             addICGenerationState->slowPathJumps.link(&m_jit);
3662             addICGenerationState->slowPathStart = m_jit.label();
3663 #if ENABLE(MATH_IC_STATS)
3664             auto slowPathStart = m_jit.label();
3665 #endif
3666
3667             silentSpill(savePlans);
3668
3669             auto innerLeftRegs = leftRegs;
3670             auto innerRightRegs = rightRegs;
3671             if (Generator::isLeftOperandValidConstant(leftOperand)) {
3672                 innerLeftRegs = resultRegs;
3673                 m_jit.moveValue(leftChild->asJSValue(), innerLeftRegs);
3674             } else if (Generator::isRightOperandValidConstant(rightOperand)) {
3675                 innerRightRegs = resultRegs;
3676                 m_jit.moveValue(rightChild->asJSValue(), innerRightRegs);
3677             }
3678
3679             if (addICGenerationState->shouldSlowPathRepatch)
3680                 addICGenerationState->slowPathCall = callOperation(bitwise_cast<J_JITOperation_EJJMic>(repatchingFunction), resultRegs, innerLeftRegs, innerRightRegs, TrustedImmPtr(mathIC));
3681             else
3682                 addICGenerationState->slowPathCall = callOperation(nonRepatchingFunction, resultRegs, innerLeftRegs, innerRightRegs);
3683
3684             silentFill(savePlans);
3685             m_jit.exceptionCheck();
3686             m_jit.jump().linkTo(done, &m_jit);
3687
3688             m_jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
3689                 mathIC->finalizeInlineCode(*addICGenerationState, linkBuffer);
3690             });
3691
3692 #if ENABLE(MATH_IC_STATS)
3693             auto slowPathEnd = m_jit.label();
3694             m_jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
3695                 size_t size = static_cast<char*>(linkBuffer.locationOf(slowPathEnd).executableAddress()) - static_cast<char*>(linkBuffer.locationOf(slowPathStart).executableAddress());
3696                 mathIC->m_generatedCodeSize += size;
3697             });
3698 #endif
3699
3700         });
3701     } else {
3702         if (Generator::isLeftOperandValidConstant(leftOperand)) {
3703             left.emplace(this, leftChild);
3704             leftRegs = left->jsValueRegs();
3705         } else if (Generator::isRightOperandValidConstant(rightOperand)) {
3706             right.emplace(this, rightChild);
3707             rightRegs = right->jsValueRegs();
3708         }
3709
3710         flushRegisters();
3711         callOperation(nonRepatchingFunction, resultRegs, leftRegs, rightRegs);
3712         m_jit.exceptionCheck();
3713     }
3714
3715 #if ENABLE(MATH_IC_STATS)
3716     auto inlineEnd = m_jit.label();
3717     m_jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
3718         size_t size = static_cast<char*>(linkBuffer.locationOf(inlineEnd).executableAddress()) - static_cast<char*>(linkBuffer.locationOf(inlineStart).executableAddress());
3719         mathIC->m_generatedCodeSize += size;
3720     });
3721 #endif
3722
3723     jsValueResult(resultRegs, node);
3724     return;
3725 }
3726
3727 void SpeculativeJIT::compileInstanceOfCustom(Node* node)
3728 {
3729     // We could do something smarter here but this case is currently super rare and unless
3730     // Symbol.hasInstance becomes popular will likely remain that way.
3731
3732     JSValueOperand value(this, node->child1());
3733     SpeculateCellOperand constructor(this, node->child2());
3734     JSValueOperand hasInstanceValue(this, node->child3());
3735     GPRTemporary result(this);
3736
3737     JSValueRegs valueRegs = value.jsValueRegs();
3738     GPRReg constructorGPR = constructor.gpr();
3739     JSValueRegs hasInstanceRegs = hasInstanceValue.jsValueRegs();
3740     GPRReg resultGPR = result.gpr();
3741
3742     MacroAssembler::Jump slowCase = m_jit.jump();
3743
3744     addSlowPathGenerator(slowPathCall(slowCase, this, operationInstanceOfCustom, resultGPR, valueRegs, constructorGPR, hasInstanceRegs));
3745
3746     unblessedBooleanResult(resultGPR, node);
3747 }
3748
3749 void SpeculativeJIT::compileIsCellWithType(Node* node)
3750 {
3751     switch (node->child1().useKind()) {
3752     case UntypedUse: {
3753         JSValueOperand value(this, node->child1());
3754 #if USE(JSVALUE64)
3755         GPRTemporary result(this, Reuse, value);
3756 #else
3757         GPRTemporary result(this, Reuse, value, PayloadWord);
3758 #endif
3759
3760         JSValueRegs valueRegs = value.jsValueRegs();
3761         GPRReg resultGPR = result.gpr();
3762
3763         JITCompiler::Jump isNotCell = m_jit.branchIfNotCell(valueRegs);
3764
3765         m_jit.compare8(JITCompiler::Equal,
3766             JITCompiler::Address(valueRegs.payloadGPR(), JSCell::typeInfoTypeOffset()),
3767             TrustedImm32(node->queriedType()),
3768             resultGPR);
3769         blessBoolean(resultGPR);
3770         JITCompiler::Jump done = m_jit.jump();
3771
3772         isNotCell.link(&m_jit);
3773         moveFalseTo(resultGPR);
3774
3775         done.link(&m_jit);
3776         blessedBooleanResult(resultGPR, node);
3777         return;
3778     }
3779
3780     case CellUse: {
3781         SpeculateCellOperand cell(this, node->child1());
3782         GPRTemporary result(this, Reuse, cell);
3783
3784         GPRReg cellGPR = cell.gpr();
3785         GPRReg resultGPR = result.gpr();
3786
3787         m_jit.compare8(JITCompiler::Equal,
3788             JITCompiler::Address(cellGPR, JSCell::typeInfoTypeOffset()),
3789             TrustedImm32(node->queriedType()),
3790             resultGPR);
3791         blessBoolean(resultGPR);
3792         blessedBooleanResult(resultGPR, node);
3793         return;
3794     }
3795
3796     default:
3797         RELEASE_ASSERT_NOT_REACHED();
3798         break;
3799     }
3800 }
3801
3802 void SpeculativeJIT::compileIsTypedArrayView(Node* node)
3803 {
3804     JSValueOperand value(this, node->child1());
3805 #if USE(JSVALUE64)
3806     GPRTemporary result(this, Reuse, value);
3807 #else
3808     GPRTemporary result(this, Reuse, value, PayloadWord);
3809 #endif
3810
3811     JSValueRegs valueRegs = value.jsValueRegs();
3812     GPRReg resultGPR = result.gpr();
3813
3814     JITCompiler::Jump isNotCell = m_jit.branchIfNotCell(valueRegs);
3815
3816     m_jit.load8(JITCompiler::Address(valueRegs.payloadGPR(), JSCell::typeInfoTypeOffset()), resultGPR);
3817     m_jit.sub32(TrustedImm32(Int8ArrayType), resultGPR);
3818     m_jit.compare32(JITCompiler::BelowOrEqual,
3819         resultGPR,
3820         TrustedImm32(Float64ArrayType - Int8ArrayType),
3821         resultGPR);
3822     blessBoolean(resultGPR);
3823     JITCompiler::Jump done = m_jit.jump();
3824
3825     isNotCell.link(&m_jit);
3826     moveFalseTo(resultGPR);
3827
3828     done.link(&m_jit);
3829     blessedBooleanResult(resultGPR, node);
3830 }
3831
3832 void SpeculativeJIT::compileCallObjectConstructor(Node* node)
3833 {
3834     RELEASE_ASSERT(node->child1().useKind() == UntypedUse);
3835     JSValueOperand value(this, node->child1());
3836 #if USE(JSVALUE64)
3837     GPRTemporary result(this, Reuse, value);
3838 #else
3839     GPRTemporary result(this, Reuse, value, PayloadWord);
3840 #endif
3841
3842     JSValueRegs valueRegs = value.jsValueRegs();
3843     GPRReg resultGPR = result.gpr();
3844
3845     MacroAssembler::JumpList slowCases;
3846     slowCases.append(m_jit.branchIfNotCell(valueRegs));
3847     slowCases.append(m_jit.branchIfNotObject(valueRegs.payloadGPR()));
3848     m_jit.move(valueRegs.payloadGPR(), resultGPR);
3849
3850     addSlowPathGenerator(slowPathCall(slowCases, this, operationObjectConstructor, resultGPR, m_jit.globalObjectFor(node->origin.semantic), valueRegs));
3851     cellResult(resultGPR, node);
3852 }
3853
3854 void SpeculativeJIT::compileArithAdd(Node* node)
3855 {
3856     switch (node->binaryUseKind()) {
3857     case Int32Use: {
3858         ASSERT(!shouldCheckNegativeZero(node->arithMode()));
3859
3860         if (node->child2()->isInt32Constant()) {
3861             SpeculateInt32Operand op1(this, node->child1());
3862             GPRTemporary result(this, Reuse, op1);
3863
3864             GPRReg gpr1 = op1.gpr();
3865             int32_t imm2 = node->child2()->asInt32();
3866             GPRReg gprResult = result.gpr();
3867
3868             if (!shouldCheckOverflow(node->arithMode())) {
3869                 m_jit.add32(Imm32(imm2), gpr1, gprResult);
3870                 int32Result(gprResult, node);
3871                 return;
3872             }
3873
3874             MacroAssembler::Jump check = m_jit.branchAdd32(MacroAssembler::Overflow, gpr1, Imm32(imm2), gprResult);
3875             if (gpr1 == gprResult) {
3876                 speculationCheck(Overflow, JSValueRegs(), 0, check,
3877                     SpeculationRecovery(SpeculativeAddImmediate, gpr1, imm2));
3878             } else
3879                 speculationCheck(Overflow, JSValueRegs(), 0, check);
3880
3881             int32Result(gprResult, node);
3882             return;
3883         }
3884                 
3885         SpeculateInt32Operand op1(this, node->child1());
3886         SpeculateInt32Operand op2(this, node->child2());
3887         GPRTemporary result(this, Reuse, op1, op2);
3888
3889         GPRReg gpr1 = op1.gpr();
3890         GPRReg gpr2 = op2.gpr();
3891         GPRReg gprResult = result.gpr();
3892
3893     &n