Introduce a VM Traps mechanism and refactor Watchdog to use it.
[WebKit-https.git] / Source / JavaScriptCore / dfg / DFGSpeculativeJIT.cpp
1 /*
2  * Copyright (C) 2011-2017 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "DFGSpeculativeJIT.h"
28
29 #if ENABLE(DFG_JIT)
30
31 #include "BinarySwitch.h"
32 #include "DFGAbstractInterpreterInlines.h"
33 #include "DFGArrayifySlowPathGenerator.h"
34 #include "DFGCallArrayAllocatorSlowPathGenerator.h"
35 #include "DFGCallCreateDirectArgumentsSlowPathGenerator.h"
36 #include "DFGCapabilities.h"
37 #include "DFGDOMJITPatchpointParams.h"
38 #include "DFGMayExit.h"
39 #include "DFGOSRExitFuzz.h"
40 #include "DFGSaneStringGetByValSlowPathGenerator.h"
41 #include "DFGSlowPathGenerator.h"
42 #include "DOMJITPatchpoint.h"
43 #include "DirectArguments.h"
44 #include "JITAddGenerator.h"
45 #include "JITBitAndGenerator.h"
46 #include "JITBitOrGenerator.h"
47 #include "JITBitXorGenerator.h"
48 #include "JITDivGenerator.h"
49 #include "JITLeftShiftGenerator.h"
50 #include "JITMulGenerator.h"
51 #include "JITRightShiftGenerator.h"
52 #include "JITSubGenerator.h"
53 #include "JSAsyncFunction.h"
54 #include "JSCInlines.h"
55 #include "JSEnvironmentRecord.h"
56 #include "JSFixedArray.h"
57 #include "JSGeneratorFunction.h"
58 #include "JSLexicalEnvironment.h"
59 #include "LinkBuffer.h"
60 #include "RegExpConstructor.h"
61 #include "ScopedArguments.h"
62 #include "ScratchRegisterAllocator.h"
63 #include <wtf/BitVector.h>
64 #include <wtf/Box.h>
65 #include <wtf/MathExtras.h>
66
67 namespace JSC { namespace DFG {
68
69 SpeculativeJIT::SpeculativeJIT(JITCompiler& jit)
70     : m_compileOkay(true)
71     , m_jit(jit)
72     , m_currentNode(0)
73     , m_lastGeneratedNode(LastNodeType)
74     , m_indexInBlock(0)
75     , m_generationInfo(m_jit.graph().frameRegisterCount())
76     , m_state(m_jit.graph())
77     , m_interpreter(m_jit.graph(), m_state)
78     , m_stream(&jit.jitCode()->variableEventStream)
79     , m_minifiedGraph(&jit.jitCode()->minifiedDFG)
80 {
81 }
82
83 SpeculativeJIT::~SpeculativeJIT()
84 {
85 }
86
87 void SpeculativeJIT::emitAllocateRawObject(GPRReg resultGPR, RegisteredStructure structure, GPRReg storageGPR, unsigned numElements, unsigned vectorLength)
88 {
89     IndexingType indexingType = structure->indexingType();
90     bool hasIndexingHeader = hasIndexedProperties(indexingType);
91
92     unsigned inlineCapacity = structure->inlineCapacity();
93     unsigned outOfLineCapacity = structure->outOfLineCapacity();
94     
95     GPRTemporary scratch(this);
96     GPRTemporary scratch2(this);
97     GPRReg scratchGPR = scratch.gpr();
98     GPRReg scratch2GPR = scratch2.gpr();
99
100     ASSERT(vectorLength >= numElements);
101     vectorLength = Butterfly::optimalContiguousVectorLength(structure.get(), vectorLength);
102     
103     JITCompiler::JumpList slowCases;
104
105     size_t size = 0;
106     if (hasIndexingHeader)
107         size += vectorLength * sizeof(JSValue) + sizeof(IndexingHeader);
108     size += outOfLineCapacity * sizeof(JSValue);
109
110     m_jit.move(TrustedImmPtr(0), storageGPR);
111     
112     if (size) {
113         if (MarkedAllocator* allocator = m_jit.vm()->auxiliarySpace.allocatorFor(size)) {
114             m_jit.move(TrustedImmPtr(allocator), scratchGPR);
115             m_jit.emitAllocate(storageGPR, allocator, scratchGPR, scratch2GPR, slowCases);
116             
117             m_jit.addPtr(
118                 TrustedImm32(outOfLineCapacity * sizeof(JSValue) + sizeof(IndexingHeader)),
119                 storageGPR);
120             
121             if (hasIndexingHeader)
122                 m_jit.store32(TrustedImm32(vectorLength), MacroAssembler::Address(storageGPR, Butterfly::offsetOfVectorLength()));
123         } else
124             slowCases.append(m_jit.jump());
125     }
126
127     size_t allocationSize = JSFinalObject::allocationSize(inlineCapacity);
128     MarkedAllocator* allocatorPtr = subspaceFor<JSFinalObject>(*m_jit.vm())->allocatorFor(allocationSize);
129     if (allocatorPtr) {
130         m_jit.move(TrustedImmPtr(allocatorPtr), scratchGPR);
131         emitAllocateJSObject(resultGPR, allocatorPtr, scratchGPR, TrustedImmPtr(structure), storageGPR, scratch2GPR, slowCases);
132         m_jit.emitInitializeInlineStorage(resultGPR, structure->inlineCapacity());
133     } else
134         slowCases.append(m_jit.jump());
135
136     // I want a slow path that also loads out the storage pointer, and that's
137     // what this custom CallArrayAllocatorSlowPathGenerator gives me. It's a lot
138     // of work for a very small piece of functionality. :-/
139     addSlowPathGenerator(std::make_unique<CallArrayAllocatorSlowPathGenerator>(
140         slowCases, this, operationNewRawObject, resultGPR, storageGPR,
141         structure, vectorLength));
142
143     if (numElements < vectorLength) {
144 #if USE(JSVALUE64)
145         if (hasDouble(structure->indexingType()))
146             m_jit.move(TrustedImm64(bitwise_cast<int64_t>(PNaN)), scratchGPR);
147         else
148             m_jit.move(TrustedImm64(JSValue::encode(JSValue())), scratchGPR);
149         for (unsigned i = numElements; i < vectorLength; ++i)
150             m_jit.store64(scratchGPR, MacroAssembler::Address(storageGPR, sizeof(double) * i));
151 #else
152         EncodedValueDescriptor value;
153         if (hasDouble(structure->indexingType()))
154             value.asInt64 = JSValue::encode(JSValue(JSValue::EncodeAsDouble, PNaN));
155         else
156             value.asInt64 = JSValue::encode(JSValue());
157         for (unsigned i = numElements; i < vectorLength; ++i) {
158             m_jit.store32(TrustedImm32(value.asBits.tag), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
159             m_jit.store32(TrustedImm32(value.asBits.payload), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
160         }
161 #endif
162     }
163     
164     if (hasIndexingHeader)
165         m_jit.store32(TrustedImm32(numElements), MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()));
166     
167     m_jit.emitInitializeOutOfLineStorage(storageGPR, structure->outOfLineCapacity());
168     
169     m_jit.mutatorFence();
170 }
171
172 void SpeculativeJIT::emitGetLength(InlineCallFrame* inlineCallFrame, GPRReg lengthGPR, bool includeThis)
173 {
174     if (inlineCallFrame && !inlineCallFrame->isVarargs())
175         m_jit.move(TrustedImm32(inlineCallFrame->arguments.size() - !includeThis), lengthGPR);
176     else {
177         VirtualRegister argumentCountRegister = m_jit.argumentCount(inlineCallFrame);
178         m_jit.load32(JITCompiler::payloadFor(argumentCountRegister), lengthGPR);
179         if (!includeThis)
180             m_jit.sub32(TrustedImm32(1), lengthGPR);
181     }
182 }
183
184 void SpeculativeJIT::emitGetLength(CodeOrigin origin, GPRReg lengthGPR, bool includeThis)
185 {
186     emitGetLength(origin.inlineCallFrame, lengthGPR, includeThis);
187 }
188
189 void SpeculativeJIT::emitGetCallee(CodeOrigin origin, GPRReg calleeGPR)
190 {
191     if (origin.inlineCallFrame) {
192         if (origin.inlineCallFrame->isClosureCall) {
193             m_jit.loadPtr(
194                 JITCompiler::addressFor(origin.inlineCallFrame->calleeRecovery.virtualRegister()),
195                 calleeGPR);
196         } else {
197             m_jit.move(
198                 TrustedImmPtr::weakPointer(m_jit.graph(), origin.inlineCallFrame->calleeRecovery.constant().asCell()),
199                 calleeGPR);
200         }
201     } else
202         m_jit.loadPtr(JITCompiler::addressFor(CallFrameSlot::callee), calleeGPR);
203 }
204
205 void SpeculativeJIT::emitGetArgumentStart(CodeOrigin origin, GPRReg startGPR)
206 {
207     m_jit.addPtr(
208         TrustedImm32(
209             JITCompiler::argumentsStart(origin).offset() * static_cast<int>(sizeof(Register))),
210         GPRInfo::callFrameRegister, startGPR);
211 }
212
213 MacroAssembler::Jump SpeculativeJIT::emitOSRExitFuzzCheck()
214 {
215     if (!Options::useOSRExitFuzz()
216         || !canUseOSRExitFuzzing(m_jit.graph().baselineCodeBlockFor(m_origin.semantic))
217         || !doOSRExitFuzzing())
218         return MacroAssembler::Jump();
219     
220     MacroAssembler::Jump result;
221     
222     m_jit.pushToSave(GPRInfo::regT0);
223     m_jit.load32(&g_numberOfOSRExitFuzzChecks, GPRInfo::regT0);
224     m_jit.add32(TrustedImm32(1), GPRInfo::regT0);
225     m_jit.store32(GPRInfo::regT0, &g_numberOfOSRExitFuzzChecks);
226     unsigned atOrAfter = Options::fireOSRExitFuzzAtOrAfter();
227     unsigned at = Options::fireOSRExitFuzzAt();
228     if (at || atOrAfter) {
229         unsigned threshold;
230         MacroAssembler::RelationalCondition condition;
231         if (atOrAfter) {
232             threshold = atOrAfter;
233             condition = MacroAssembler::Below;
234         } else {
235             threshold = at;
236             condition = MacroAssembler::NotEqual;
237         }
238         MacroAssembler::Jump ok = m_jit.branch32(
239             condition, GPRInfo::regT0, MacroAssembler::TrustedImm32(threshold));
240         m_jit.popToRestore(GPRInfo::regT0);
241         result = m_jit.jump();
242         ok.link(&m_jit);
243     }
244     m_jit.popToRestore(GPRInfo::regT0);
245     
246     return result;
247 }
248
249 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail)
250 {
251     if (!m_compileOkay)
252         return;
253     JITCompiler::Jump fuzzJump = emitOSRExitFuzzCheck();
254     if (fuzzJump.isSet()) {
255         JITCompiler::JumpList jumpsToFail;
256         jumpsToFail.append(fuzzJump);
257         jumpsToFail.append(jumpToFail);
258         m_jit.appendExitInfo(jumpsToFail);
259     } else
260         m_jit.appendExitInfo(jumpToFail);
261     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(m_currentNode, node), this, m_stream->size()));
262 }
263
264 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, const MacroAssembler::JumpList& jumpsToFail)
265 {
266     if (!m_compileOkay)
267         return;
268     JITCompiler::Jump fuzzJump = emitOSRExitFuzzCheck();
269     if (fuzzJump.isSet()) {
270         JITCompiler::JumpList myJumpsToFail;
271         myJumpsToFail.append(jumpsToFail);
272         myJumpsToFail.append(fuzzJump);
273         m_jit.appendExitInfo(myJumpsToFail);
274     } else
275         m_jit.appendExitInfo(jumpsToFail);
276     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(m_currentNode, node), this, m_stream->size()));
277 }
278
279 OSRExitJumpPlaceholder SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node)
280 {
281     if (!m_compileOkay)
282         return OSRExitJumpPlaceholder();
283     unsigned index = m_jit.jitCode()->osrExit.size();
284     m_jit.appendExitInfo();
285     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(m_currentNode, node), this, m_stream->size()));
286     return OSRExitJumpPlaceholder(index);
287 }
288
289 OSRExitJumpPlaceholder SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse)
290 {
291     return speculationCheck(kind, jsValueSource, nodeUse.node());
292 }
293
294 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail)
295 {
296     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail);
297 }
298
299 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, const MacroAssembler::JumpList& jumpsToFail)
300 {
301     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpsToFail);
302 }
303
304 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
305 {
306     if (!m_compileOkay)
307         return;
308     unsigned recoveryIndex = m_jit.jitCode()->appendSpeculationRecovery(recovery);
309     m_jit.appendExitInfo(jumpToFail);
310     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(m_currentNode, node), this, m_stream->size(), recoveryIndex));
311 }
312
313 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
314 {
315     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail, recovery);
316 }
317
318 void SpeculativeJIT::emitInvalidationPoint(Node* node)
319 {
320     if (!m_compileOkay)
321         return;
322     OSRExitCompilationInfo& info = m_jit.appendExitInfo(JITCompiler::JumpList());
323     m_jit.jitCode()->appendOSRExit(OSRExit(
324         UncountableInvalidation, JSValueSource(), MethodOfGettingAValueProfile(),
325         this, m_stream->size()));
326     info.m_replacementSource = m_jit.watchpointLabel();
327     ASSERT(info.m_replacementSource.isSet());
328     noResult(node);
329 }
330
331 void SpeculativeJIT::unreachable(Node* node)
332 {
333     m_compileOkay = false;
334     m_jit.abortWithReason(DFGUnreachableNode, node->op());
335 }
336
337 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Node* node)
338 {
339     if (!m_compileOkay)
340         return;
341     speculationCheck(kind, jsValueRegs, node, m_jit.jump());
342     m_compileOkay = false;
343     if (verboseCompilationEnabled())
344         dataLog("Bailing compilation.\n");
345 }
346
347 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Edge nodeUse)
348 {
349     terminateSpeculativeExecution(kind, jsValueRegs, nodeUse.node());
350 }
351
352 void SpeculativeJIT::typeCheck(JSValueSource source, Edge edge, SpeculatedType typesPassedThrough, MacroAssembler::Jump jumpToFail, ExitKind exitKind)
353 {
354     ASSERT(needsTypeCheck(edge, typesPassedThrough));
355     m_interpreter.filter(edge, typesPassedThrough);
356     speculationCheck(exitKind, source, edge.node(), jumpToFail);
357 }
358
359 RegisterSet SpeculativeJIT::usedRegisters()
360 {
361     RegisterSet result;
362     
363     for (unsigned i = GPRInfo::numberOfRegisters; i--;) {
364         GPRReg gpr = GPRInfo::toRegister(i);
365         if (m_gprs.isInUse(gpr))
366             result.set(gpr);
367     }
368     for (unsigned i = FPRInfo::numberOfRegisters; i--;) {
369         FPRReg fpr = FPRInfo::toRegister(i);
370         if (m_fprs.isInUse(fpr))
371             result.set(fpr);
372     }
373     
374     result.merge(RegisterSet::stubUnavailableRegisters());
375     
376     return result;
377 }
378
379 void SpeculativeJIT::addSlowPathGenerator(std::unique_ptr<SlowPathGenerator> slowPathGenerator)
380 {
381     m_slowPathGenerators.append(WTFMove(slowPathGenerator));
382 }
383
384 void SpeculativeJIT::addSlowPathGenerator(std::function<void()> lambda)
385 {
386     m_slowPathLambdas.append(SlowPathLambda{ lambda, m_currentNode, static_cast<unsigned>(m_stream->size()) });
387 }
388
389 void SpeculativeJIT::runSlowPathGenerators(PCToCodeOriginMapBuilder& pcToCodeOriginMapBuilder)
390 {
391     for (auto& slowPathGenerator : m_slowPathGenerators) {
392         pcToCodeOriginMapBuilder.appendItem(m_jit.labelIgnoringWatchpoints(), slowPathGenerator->origin().semantic);
393         slowPathGenerator->generate(this);
394     }
395     for (auto& slowPathLambda : m_slowPathLambdas) {
396         Node* currentNode = slowPathLambda.currentNode;
397         m_currentNode = currentNode;
398         m_outOfLineStreamIndex = slowPathLambda.streamIndex;
399         pcToCodeOriginMapBuilder.appendItem(m_jit.labelIgnoringWatchpoints(), currentNode->origin.semantic);
400         slowPathLambda.generator();
401         m_outOfLineStreamIndex = std::nullopt;
402     }
403 }
404
405 void SpeculativeJIT::clearGenerationInfo()
406 {
407     for (unsigned i = 0; i < m_generationInfo.size(); ++i)
408         m_generationInfo[i] = GenerationInfo();
409     m_gprs = RegisterBank<GPRInfo>();
410     m_fprs = RegisterBank<FPRInfo>();
411 }
412
413 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForGPR(VirtualRegister spillMe, GPRReg source)
414 {
415     GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
416     Node* node = info.node();
417     DataFormat registerFormat = info.registerFormat();
418     ASSERT(registerFormat != DataFormatNone);
419     ASSERT(registerFormat != DataFormatDouble);
420         
421     SilentSpillAction spillAction;
422     SilentFillAction fillAction;
423         
424     if (!info.needsSpill())
425         spillAction = DoNothingForSpill;
426     else {
427 #if USE(JSVALUE64)
428         ASSERT(info.gpr() == source);
429         if (registerFormat == DataFormatInt32)
430             spillAction = Store32Payload;
431         else if (registerFormat == DataFormatCell || registerFormat == DataFormatStorage)
432             spillAction = StorePtr;
433         else if (registerFormat == DataFormatInt52 || registerFormat == DataFormatStrictInt52)
434             spillAction = Store64;
435         else {
436             ASSERT(registerFormat & DataFormatJS);
437             spillAction = Store64;
438         }
439 #elif USE(JSVALUE32_64)
440         if (registerFormat & DataFormatJS) {
441             ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
442             spillAction = source == info.tagGPR() ? Store32Tag : Store32Payload;
443         } else {
444             ASSERT(info.gpr() == source);
445             spillAction = Store32Payload;
446         }
447 #endif
448     }
449         
450     if (registerFormat == DataFormatInt32) {
451         ASSERT(info.gpr() == source);
452         ASSERT(isJSInt32(info.registerFormat()));
453         if (node->hasConstant()) {
454             ASSERT(node->isInt32Constant());
455             fillAction = SetInt32Constant;
456         } else
457             fillAction = Load32Payload;
458     } else if (registerFormat == DataFormatBoolean) {
459 #if USE(JSVALUE64)
460         RELEASE_ASSERT_NOT_REACHED();
461 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
462         fillAction = DoNothingForFill;
463 #endif
464 #elif USE(JSVALUE32_64)
465         ASSERT(info.gpr() == source);
466         if (node->hasConstant()) {
467             ASSERT(node->isBooleanConstant());
468             fillAction = SetBooleanConstant;
469         } else
470             fillAction = Load32Payload;
471 #endif
472     } else if (registerFormat == DataFormatCell) {
473         ASSERT(info.gpr() == source);
474         if (node->hasConstant()) {
475             DFG_ASSERT(m_jit.graph(), m_currentNode, node->isCellConstant());
476             node->asCell(); // To get the assertion.
477             fillAction = SetCellConstant;
478         } else {
479 #if USE(JSVALUE64)
480             fillAction = LoadPtr;
481 #else
482             fillAction = Load32Payload;
483 #endif
484         }
485     } else if (registerFormat == DataFormatStorage) {
486         ASSERT(info.gpr() == source);
487         fillAction = LoadPtr;
488     } else if (registerFormat == DataFormatInt52) {
489         if (node->hasConstant())
490             fillAction = SetInt52Constant;
491         else if (info.spillFormat() == DataFormatInt52)
492             fillAction = Load64;
493         else if (info.spillFormat() == DataFormatStrictInt52)
494             fillAction = Load64ShiftInt52Left;
495         else if (info.spillFormat() == DataFormatNone)
496             fillAction = Load64;
497         else {
498             RELEASE_ASSERT_NOT_REACHED();
499 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
500             fillAction = Load64; // Make GCC happy.
501 #endif
502         }
503     } else if (registerFormat == DataFormatStrictInt52) {
504         if (node->hasConstant())
505             fillAction = SetStrictInt52Constant;
506         else if (info.spillFormat() == DataFormatInt52)
507             fillAction = Load64ShiftInt52Right;
508         else if (info.spillFormat() == DataFormatStrictInt52)
509             fillAction = Load64;
510         else if (info.spillFormat() == DataFormatNone)
511             fillAction = Load64;
512         else {
513             RELEASE_ASSERT_NOT_REACHED();
514 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
515             fillAction = Load64; // Make GCC happy.
516 #endif
517         }
518     } else {
519         ASSERT(registerFormat & DataFormatJS);
520 #if USE(JSVALUE64)
521         ASSERT(info.gpr() == source);
522         if (node->hasConstant()) {
523             if (node->isCellConstant())
524                 fillAction = SetTrustedJSConstant;
525             else
526                 fillAction = SetJSConstant;
527         } else if (info.spillFormat() == DataFormatInt32) {
528             ASSERT(registerFormat == DataFormatJSInt32);
529             fillAction = Load32PayloadBoxInt;
530         } else
531             fillAction = Load64;
532 #else
533         ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
534         if (node->hasConstant())
535             fillAction = info.tagGPR() == source ? SetJSConstantTag : SetJSConstantPayload;
536         else if (info.payloadGPR() == source)
537             fillAction = Load32Payload;
538         else { // Fill the Tag
539             switch (info.spillFormat()) {
540             case DataFormatInt32:
541                 ASSERT(registerFormat == DataFormatJSInt32);
542                 fillAction = SetInt32Tag;
543                 break;
544             case DataFormatCell:
545                 ASSERT(registerFormat == DataFormatJSCell);
546                 fillAction = SetCellTag;
547                 break;
548             case DataFormatBoolean:
549                 ASSERT(registerFormat == DataFormatJSBoolean);
550                 fillAction = SetBooleanTag;
551                 break;
552             default:
553                 fillAction = Load32Tag;
554                 break;
555             }
556         }
557 #endif
558     }
559         
560     return SilentRegisterSavePlan(spillAction, fillAction, node, source);
561 }
562     
563 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForFPR(VirtualRegister spillMe, FPRReg source)
564 {
565     GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
566     Node* node = info.node();
567     ASSERT(info.registerFormat() == DataFormatDouble);
568
569     SilentSpillAction spillAction;
570     SilentFillAction fillAction;
571         
572     if (!info.needsSpill())
573         spillAction = DoNothingForSpill;
574     else {
575         ASSERT(!node->hasConstant());
576         ASSERT(info.spillFormat() == DataFormatNone);
577         ASSERT(info.fpr() == source);
578         spillAction = StoreDouble;
579     }
580         
581 #if USE(JSVALUE64)
582     if (node->hasConstant()) {
583         node->asNumber(); // To get the assertion.
584         fillAction = SetDoubleConstant;
585     } else {
586         ASSERT(info.spillFormat() == DataFormatNone || info.spillFormat() == DataFormatDouble);
587         fillAction = LoadDouble;
588     }
589 #elif USE(JSVALUE32_64)
590     ASSERT(info.registerFormat() == DataFormatDouble);
591     if (node->hasConstant()) {
592         node->asNumber(); // To get the assertion.
593         fillAction = SetDoubleConstant;
594     } else
595         fillAction = LoadDouble;
596 #endif
597
598     return SilentRegisterSavePlan(spillAction, fillAction, node, source);
599 }
600     
601 void SpeculativeJIT::silentSpill(const SilentRegisterSavePlan& plan)
602 {
603     switch (plan.spillAction()) {
604     case DoNothingForSpill:
605         break;
606     case Store32Tag:
607         m_jit.store32(plan.gpr(), JITCompiler::tagFor(plan.node()->virtualRegister()));
608         break;
609     case Store32Payload:
610         m_jit.store32(plan.gpr(), JITCompiler::payloadFor(plan.node()->virtualRegister()));
611         break;
612     case StorePtr:
613         m_jit.storePtr(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
614         break;
615 #if USE(JSVALUE64)
616     case Store64:
617         m_jit.store64(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
618         break;
619 #endif
620     case StoreDouble:
621         m_jit.storeDouble(plan.fpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
622         break;
623     default:
624         RELEASE_ASSERT_NOT_REACHED();
625     }
626 }
627     
628 void SpeculativeJIT::silentFill(const SilentRegisterSavePlan& plan, GPRReg canTrample)
629 {
630 #if USE(JSVALUE32_64)
631     UNUSED_PARAM(canTrample);
632 #endif
633     switch (plan.fillAction()) {
634     case DoNothingForFill:
635         break;
636     case SetInt32Constant:
637         m_jit.move(Imm32(plan.node()->asInt32()), plan.gpr());
638         break;
639 #if USE(JSVALUE64)
640     case SetInt52Constant:
641         m_jit.move(Imm64(plan.node()->asAnyInt() << JSValue::int52ShiftAmount), plan.gpr());
642         break;
643     case SetStrictInt52Constant:
644         m_jit.move(Imm64(plan.node()->asAnyInt()), plan.gpr());
645         break;
646 #endif // USE(JSVALUE64)
647     case SetBooleanConstant:
648         m_jit.move(TrustedImm32(plan.node()->asBoolean()), plan.gpr());
649         break;
650     case SetCellConstant:
651         ASSERT(plan.node()->constant()->value().isCell());
652         m_jit.move(TrustedImmPtr(plan.node()->constant()), plan.gpr());
653         break;
654 #if USE(JSVALUE64)
655     case SetTrustedJSConstant:
656         m_jit.move(valueOfJSConstantAsImm64(plan.node()).asTrustedImm64(), plan.gpr());
657         break;
658     case SetJSConstant:
659         m_jit.move(valueOfJSConstantAsImm64(plan.node()), plan.gpr());
660         break;
661     case SetDoubleConstant:
662         m_jit.move(Imm64(reinterpretDoubleToInt64(plan.node()->asNumber())), canTrample);
663         m_jit.move64ToDouble(canTrample, plan.fpr());
664         break;
665     case Load32PayloadBoxInt:
666         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
667         m_jit.or64(GPRInfo::tagTypeNumberRegister, plan.gpr());
668         break;
669     case Load32PayloadConvertToInt52:
670         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
671         m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
672         m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
673         break;
674     case Load32PayloadSignExtend:
675         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
676         m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
677         break;
678 #else
679     case SetJSConstantTag:
680         m_jit.move(Imm32(plan.node()->asJSValue().tag()), plan.gpr());
681         break;
682     case SetJSConstantPayload:
683         m_jit.move(Imm32(plan.node()->asJSValue().payload()), plan.gpr());
684         break;
685     case SetInt32Tag:
686         m_jit.move(TrustedImm32(JSValue::Int32Tag), plan.gpr());
687         break;
688     case SetCellTag:
689         m_jit.move(TrustedImm32(JSValue::CellTag), plan.gpr());
690         break;
691     case SetBooleanTag:
692         m_jit.move(TrustedImm32(JSValue::BooleanTag), plan.gpr());
693         break;
694     case SetDoubleConstant:
695         m_jit.loadDouble(TrustedImmPtr(m_jit.addressOfDoubleConstant(plan.node())), plan.fpr());
696         break;
697 #endif
698     case Load32Tag:
699         m_jit.load32(JITCompiler::tagFor(plan.node()->virtualRegister()), plan.gpr());
700         break;
701     case Load32Payload:
702         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
703         break;
704     case LoadPtr:
705         m_jit.loadPtr(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
706         break;
707 #if USE(JSVALUE64)
708     case Load64:
709         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
710         break;
711     case Load64ShiftInt52Right:
712         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
713         m_jit.rshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
714         break;
715     case Load64ShiftInt52Left:
716         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
717         m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
718         break;
719 #endif
720     case LoadDouble:
721         m_jit.loadDouble(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.fpr());
722         break;
723     default:
724         RELEASE_ASSERT_NOT_REACHED();
725     }
726 }
727     
728 JITCompiler::Jump SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode, IndexingType shape)
729 {
730     switch (arrayMode.arrayClass()) {
731     case Array::OriginalArray: {
732         CRASH();
733 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
734         JITCompiler::Jump result; // I already know that VC++ takes unkindly to the expression "return Jump()", so I'm doing it this way in anticipation of someone eventually using VC++ to compile the DFG.
735         return result;
736 #endif
737     }
738         
739     case Array::Array:
740         m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
741         return m_jit.branch32(
742             MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | shape));
743         
744     case Array::NonArray:
745     case Array::OriginalNonArray:
746         m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
747         return m_jit.branch32(
748             MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
749         
750     case Array::PossiblyArray:
751         m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
752         return m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
753     }
754     
755     RELEASE_ASSERT_NOT_REACHED();
756     return JITCompiler::Jump();
757 }
758
759 JITCompiler::JumpList SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode)
760 {
761     JITCompiler::JumpList result;
762     
763     switch (arrayMode.type()) {
764     case Array::Int32:
765         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, Int32Shape);
766
767     case Array::Double:
768         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, DoubleShape);
769
770     case Array::Contiguous:
771         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, ContiguousShape);
772
773     case Array::Undecided:
774         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, UndecidedShape);
775
776     case Array::ArrayStorage:
777     case Array::SlowPutArrayStorage: {
778         ASSERT(!arrayMode.isJSArrayWithOriginalStructure());
779         
780         if (arrayMode.isJSArray()) {
781             if (arrayMode.isSlowPut()) {
782                 result.append(
783                     m_jit.branchTest32(
784                         MacroAssembler::Zero, tempGPR, MacroAssembler::TrustedImm32(IsArray)));
785                 m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
786                 m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
787                 result.append(
788                     m_jit.branch32(
789                         MacroAssembler::Above, tempGPR,
790                         TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
791                 break;
792             }
793             m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
794             result.append(
795                 m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | ArrayStorageShape)));
796             break;
797         }
798         m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
799         if (arrayMode.isSlowPut()) {
800             m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
801             result.append(
802                 m_jit.branch32(
803                     MacroAssembler::Above, tempGPR,
804                     TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
805             break;
806         }
807         result.append(
808             m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(ArrayStorageShape)));
809         break;
810     }
811     default:
812         CRASH();
813         break;
814     }
815     
816     return result;
817 }
818
819 void SpeculativeJIT::checkArray(Node* node)
820 {
821     ASSERT(node->arrayMode().isSpecific());
822     ASSERT(!node->arrayMode().doesConversion());
823     
824     SpeculateCellOperand base(this, node->child1());
825     GPRReg baseReg = base.gpr();
826     
827     if (node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1()))) {
828         noResult(m_currentNode);
829         return;
830     }
831     
832     const ClassInfo* expectedClassInfo = 0;
833     
834     switch (node->arrayMode().type()) {
835     case Array::AnyTypedArray:
836     case Array::String:
837         RELEASE_ASSERT_NOT_REACHED(); // Should have been a Phantom(String:)
838         break;
839     case Array::Int32:
840     case Array::Double:
841     case Array::Contiguous:
842     case Array::Undecided:
843     case Array::ArrayStorage:
844     case Array::SlowPutArrayStorage: {
845         GPRTemporary temp(this);
846         GPRReg tempGPR = temp.gpr();
847         m_jit.load8(MacroAssembler::Address(baseReg, JSCell::indexingTypeAndMiscOffset()), tempGPR);
848         speculationCheck(
849             BadIndexingType, JSValueSource::unboxedCell(baseReg), 0,
850             jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
851         
852         noResult(m_currentNode);
853         return;
854     }
855     case Array::DirectArguments:
856         speculateCellTypeWithoutTypeFiltering(node->child1(), baseReg, DirectArgumentsType);
857         noResult(m_currentNode);
858         return;
859     case Array::ScopedArguments:
860         speculateCellTypeWithoutTypeFiltering(node->child1(), baseReg, ScopedArgumentsType);
861         noResult(m_currentNode);
862         return;
863     default:
864         speculateCellTypeWithoutTypeFiltering(
865             node->child1(), baseReg,
866             typeForTypedArrayType(node->arrayMode().typedArrayType()));
867         noResult(m_currentNode);
868         return;
869     }
870     
871     RELEASE_ASSERT(expectedClassInfo);
872     
873     GPRTemporary temp(this);
874     GPRTemporary temp2(this);
875     m_jit.emitLoadStructure(baseReg, temp.gpr(), temp2.gpr());
876     speculationCheck(
877         BadType, JSValueSource::unboxedCell(baseReg), node,
878         m_jit.branchPtr(
879             MacroAssembler::NotEqual,
880             MacroAssembler::Address(temp.gpr(), Structure::classInfoOffset()),
881             TrustedImmPtr(expectedClassInfo)));
882     
883     noResult(m_currentNode);
884 }
885
886 void SpeculativeJIT::arrayify(Node* node, GPRReg baseReg, GPRReg propertyReg)
887 {
888     ASSERT(node->arrayMode().doesConversion());
889     
890     GPRTemporary temp(this);
891     GPRTemporary structure;
892     GPRReg tempGPR = temp.gpr();
893     GPRReg structureGPR = InvalidGPRReg;
894     
895     if (node->op() != ArrayifyToStructure) {
896         GPRTemporary realStructure(this);
897         structure.adopt(realStructure);
898         structureGPR = structure.gpr();
899     }
900         
901     // We can skip all that comes next if we already have array storage.
902     MacroAssembler::JumpList slowPath;
903     
904     if (node->op() == ArrayifyToStructure) {
905         slowPath.append(m_jit.branchWeakStructure(
906             JITCompiler::NotEqual,
907             JITCompiler::Address(baseReg, JSCell::structureIDOffset()),
908             node->structure()));
909     } else {
910         m_jit.load8(
911             MacroAssembler::Address(baseReg, JSCell::indexingTypeAndMiscOffset()), tempGPR);
912         
913         slowPath.append(jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
914     }
915     
916     addSlowPathGenerator(std::make_unique<ArrayifySlowPathGenerator>(
917         slowPath, this, node, baseReg, propertyReg, tempGPR, structureGPR));
918     
919     noResult(m_currentNode);
920 }
921
922 void SpeculativeJIT::arrayify(Node* node)
923 {
924     ASSERT(node->arrayMode().isSpecific());
925     
926     SpeculateCellOperand base(this, node->child1());
927     
928     if (!node->child2()) {
929         arrayify(node, base.gpr(), InvalidGPRReg);
930         return;
931     }
932     
933     SpeculateInt32Operand property(this, node->child2());
934     
935     arrayify(node, base.gpr(), property.gpr());
936 }
937
938 GPRReg SpeculativeJIT::fillStorage(Edge edge)
939 {
940     VirtualRegister virtualRegister = edge->virtualRegister();
941     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
942     
943     switch (info.registerFormat()) {
944     case DataFormatNone: {
945         if (info.spillFormat() == DataFormatStorage) {
946             GPRReg gpr = allocate();
947             m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
948             m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr);
949             info.fillStorage(*m_stream, gpr);
950             return gpr;
951         }
952         
953         // Must be a cell; fill it as a cell and then return the pointer.
954         return fillSpeculateCell(edge);
955     }
956         
957     case DataFormatStorage: {
958         GPRReg gpr = info.gpr();
959         m_gprs.lock(gpr);
960         return gpr;
961     }
962         
963     default:
964         return fillSpeculateCell(edge);
965     }
966 }
967
968 void SpeculativeJIT::useChildren(Node* node)
969 {
970     if (node->flags() & NodeHasVarArgs) {
971         for (unsigned childIdx = node->firstChild(); childIdx < node->firstChild() + node->numChildren(); childIdx++) {
972             if (!!m_jit.graph().m_varArgChildren[childIdx])
973                 use(m_jit.graph().m_varArgChildren[childIdx]);
974         }
975     } else {
976         Edge child1 = node->child1();
977         if (!child1) {
978             ASSERT(!node->child2() && !node->child3());
979             return;
980         }
981         use(child1);
982         
983         Edge child2 = node->child2();
984         if (!child2) {
985             ASSERT(!node->child3());
986             return;
987         }
988         use(child2);
989         
990         Edge child3 = node->child3();
991         if (!child3)
992             return;
993         use(child3);
994     }
995 }
996
997 void SpeculativeJIT::compileTryGetById(Node* node)
998 {
999     switch (node->child1().useKind()) {
1000     case CellUse: {
1001         SpeculateCellOperand base(this, node->child1());
1002         JSValueRegsTemporary result(this, Reuse, base);
1003
1004         JSValueRegs baseRegs = JSValueRegs::payloadOnly(base.gpr());
1005         JSValueRegs resultRegs = result.regs();
1006
1007         base.use();
1008
1009         cachedGetById(node->origin.semantic, baseRegs, resultRegs, node->identifierNumber(), JITCompiler::Jump(), NeedToSpill, AccessType::TryGet);
1010
1011         jsValueResult(resultRegs, node, DataFormatJS, UseChildrenCalledExplicitly);
1012         break;
1013     }
1014
1015     case UntypedUse: {
1016         JSValueOperand base(this, node->child1());
1017         JSValueRegsTemporary result(this, Reuse, base);
1018
1019         JSValueRegs baseRegs = base.jsValueRegs();
1020         JSValueRegs resultRegs = result.regs();
1021
1022         base.use();
1023
1024         JITCompiler::Jump notCell = m_jit.branchIfNotCell(baseRegs);
1025
1026         cachedGetById(node->origin.semantic, baseRegs, resultRegs, node->identifierNumber(), notCell, NeedToSpill, AccessType::TryGet);
1027
1028         jsValueResult(resultRegs, node, DataFormatJS, UseChildrenCalledExplicitly);
1029         break;
1030     }
1031
1032     default:
1033         DFG_CRASH(m_jit.graph(), node, "Bad use kind");
1034         break;
1035     } 
1036 }
1037
1038 void SpeculativeJIT::compileIn(Node* node)
1039 {
1040     SpeculateCellOperand base(this, node->child1());
1041     GPRReg baseGPR = base.gpr();
1042     
1043     if (JSString* string = node->child2()->dynamicCastConstant<JSString*>(*m_jit.vm())) {
1044         if (string->tryGetValueImpl() && string->tryGetValueImpl()->isAtomic()) {
1045             StructureStubInfo* stubInfo = m_jit.codeBlock()->addStubInfo(AccessType::In);
1046             
1047             GPRTemporary result(this);
1048             GPRReg resultGPR = result.gpr();
1049
1050             use(node->child2());
1051             
1052             MacroAssembler::PatchableJump jump = m_jit.patchableJump();
1053             MacroAssembler::Label done = m_jit.label();
1054             
1055             // Since this block is executed only when the result of string->tryGetValueImpl() is atomic,
1056             // we can cast it to const AtomicStringImpl* safely.
1057             auto slowPath = slowPathCall(
1058                 jump.m_jump, this, operationInOptimize,
1059                 JSValueRegs::payloadOnly(resultGPR), stubInfo, baseGPR,
1060                 static_cast<const AtomicStringImpl*>(string->tryGetValueImpl()));
1061             
1062             stubInfo->callSiteIndex = m_jit.addCallSite(node->origin.semantic);
1063             stubInfo->codeOrigin = node->origin.semantic;
1064             stubInfo->patch.baseGPR = static_cast<int8_t>(baseGPR);
1065             stubInfo->patch.valueGPR = static_cast<int8_t>(resultGPR);
1066 #if USE(JSVALUE32_64)
1067             stubInfo->patch.valueTagGPR = static_cast<int8_t>(InvalidGPRReg);
1068             stubInfo->patch.baseTagGPR = static_cast<int8_t>(InvalidGPRReg);
1069 #endif
1070             stubInfo->patch.usedRegisters = usedRegisters();
1071
1072             m_jit.addIn(InRecord(jump, done, slowPath.get(), stubInfo));
1073             addSlowPathGenerator(WTFMove(slowPath));
1074
1075             base.use();
1076
1077             blessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
1078             return;
1079         }
1080     }
1081
1082     JSValueOperand key(this, node->child2());
1083     JSValueRegs regs = key.jsValueRegs();
1084         
1085     GPRFlushedCallResult result(this);
1086     GPRReg resultGPR = result.gpr();
1087         
1088     base.use();
1089     key.use();
1090         
1091     flushRegisters();
1092     callOperation(
1093         operationGenericIn, extractResult(JSValueRegs::payloadOnly(resultGPR)),
1094         baseGPR, regs);
1095     m_jit.exceptionCheck();
1096     blessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
1097 }
1098
1099 void SpeculativeJIT::compileDeleteById(Node* node)
1100 {
1101     JSValueOperand value(this, node->child1());
1102     GPRFlushedCallResult result(this);
1103
1104     JSValueRegs valueRegs = value.jsValueRegs();
1105     GPRReg resultGPR = result.gpr();
1106
1107     value.use();
1108
1109     flushRegisters();
1110     callOperation(operationDeleteById, resultGPR, valueRegs, identifierUID(node->identifierNumber()));
1111     m_jit.exceptionCheck();
1112
1113     unblessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
1114 }
1115
1116 void SpeculativeJIT::compileDeleteByVal(Node* node)
1117 {
1118     JSValueOperand base(this, node->child1());
1119     JSValueOperand key(this, node->child2());
1120     GPRFlushedCallResult result(this);
1121
1122     JSValueRegs baseRegs = base.jsValueRegs();
1123     JSValueRegs keyRegs = key.jsValueRegs();
1124     GPRReg resultGPR = result.gpr();
1125
1126     base.use();
1127     key.use();
1128
1129     flushRegisters();
1130     callOperation(operationDeleteByVal, resultGPR, baseRegs, keyRegs);
1131     m_jit.exceptionCheck();
1132
1133     unblessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
1134 }
1135
1136 bool SpeculativeJIT::nonSpeculativeCompare(Node* node, MacroAssembler::RelationalCondition cond, S_JITOperation_EJJ helperFunction)
1137 {
1138     unsigned branchIndexInBlock = detectPeepHoleBranch();
1139     if (branchIndexInBlock != UINT_MAX) {
1140         Node* branchNode = m_block->at(branchIndexInBlock);
1141
1142         ASSERT(node->adjustedRefCount() == 1);
1143         
1144         nonSpeculativePeepholeBranch(node, branchNode, cond, helperFunction);
1145     
1146         m_indexInBlock = branchIndexInBlock;
1147         m_currentNode = branchNode;
1148         
1149         return true;
1150     }
1151     
1152     nonSpeculativeNonPeepholeCompare(node, cond, helperFunction);
1153     
1154     return false;
1155 }
1156
1157 bool SpeculativeJIT::nonSpeculativeStrictEq(Node* node, bool invert)
1158 {
1159     unsigned branchIndexInBlock = detectPeepHoleBranch();
1160     if (branchIndexInBlock != UINT_MAX) {
1161         Node* branchNode = m_block->at(branchIndexInBlock);
1162
1163         ASSERT(node->adjustedRefCount() == 1);
1164         
1165         nonSpeculativePeepholeStrictEq(node, branchNode, invert);
1166     
1167         m_indexInBlock = branchIndexInBlock;
1168         m_currentNode = branchNode;
1169         
1170         return true;
1171     }
1172     
1173     nonSpeculativeNonPeepholeStrictEq(node, invert);
1174     
1175     return false;
1176 }
1177
1178 static const char* dataFormatString(DataFormat format)
1179 {
1180     // These values correspond to the DataFormat enum.
1181     const char* strings[] = {
1182         "[  ]",
1183         "[ i]",
1184         "[ d]",
1185         "[ c]",
1186         "Err!",
1187         "Err!",
1188         "Err!",
1189         "Err!",
1190         "[J ]",
1191         "[Ji]",
1192         "[Jd]",
1193         "[Jc]",
1194         "Err!",
1195         "Err!",
1196         "Err!",
1197         "Err!",
1198     };
1199     return strings[format];
1200 }
1201
1202 void SpeculativeJIT::dump(const char* label)
1203 {
1204     if (label)
1205         dataLogF("<%s>\n", label);
1206
1207     dataLogF("  gprs:\n");
1208     m_gprs.dump();
1209     dataLogF("  fprs:\n");
1210     m_fprs.dump();
1211     dataLogF("  VirtualRegisters:\n");
1212     for (unsigned i = 0; i < m_generationInfo.size(); ++i) {
1213         GenerationInfo& info = m_generationInfo[i];
1214         if (info.alive())
1215             dataLogF("    % 3d:%s%s", i, dataFormatString(info.registerFormat()), dataFormatString(info.spillFormat()));
1216         else
1217             dataLogF("    % 3d:[__][__]", i);
1218         if (info.registerFormat() == DataFormatDouble)
1219             dataLogF(":fpr%d\n", info.fpr());
1220         else if (info.registerFormat() != DataFormatNone
1221 #if USE(JSVALUE32_64)
1222             && !(info.registerFormat() & DataFormatJS)
1223 #endif
1224             ) {
1225             ASSERT(info.gpr() != InvalidGPRReg);
1226             dataLogF(":%s\n", GPRInfo::debugName(info.gpr()));
1227         } else
1228             dataLogF("\n");
1229     }
1230     if (label)
1231         dataLogF("</%s>\n", label);
1232 }
1233
1234 GPRTemporary::GPRTemporary()
1235     : m_jit(0)
1236     , m_gpr(InvalidGPRReg)
1237 {
1238 }
1239
1240 GPRTemporary::GPRTemporary(SpeculativeJIT* jit)
1241     : m_jit(jit)
1242     , m_gpr(InvalidGPRReg)
1243 {
1244     m_gpr = m_jit->allocate();
1245 }
1246
1247 GPRTemporary::GPRTemporary(SpeculativeJIT* jit, GPRReg specific)
1248     : m_jit(jit)
1249     , m_gpr(InvalidGPRReg)
1250 {
1251     m_gpr = m_jit->allocate(specific);
1252 }
1253
1254 #if USE(JSVALUE32_64)
1255 GPRTemporary::GPRTemporary(
1256     SpeculativeJIT* jit, ReuseTag, JSValueOperand& op1, WhichValueWord which)
1257     : m_jit(jit)
1258     , m_gpr(InvalidGPRReg)
1259 {
1260     if (!op1.isDouble() && m_jit->canReuse(op1.node()))
1261         m_gpr = m_jit->reuse(op1.gpr(which));
1262     else
1263         m_gpr = m_jit->allocate();
1264 }
1265 #endif // USE(JSVALUE32_64)
1266
1267 JSValueRegsTemporary::JSValueRegsTemporary() { }
1268
1269 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit)
1270 #if USE(JSVALUE64)
1271     : m_gpr(jit)
1272 #else
1273     : m_payloadGPR(jit)
1274     , m_tagGPR(jit)
1275 #endif
1276 {
1277 }
1278
1279 #if USE(JSVALUE64)
1280 template<typename T>
1281 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit, ReuseTag, T& operand, WhichValueWord)
1282     : m_gpr(jit, Reuse, operand)
1283 {
1284 }
1285 #else
1286 template<typename T>
1287 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit, ReuseTag, T& operand, WhichValueWord resultWord)
1288 {
1289     if (resultWord == PayloadWord) {
1290         m_payloadGPR = GPRTemporary(jit, Reuse, operand);
1291         m_tagGPR = GPRTemporary(jit);
1292     } else {
1293         m_payloadGPR = GPRTemporary(jit);
1294         m_tagGPR = GPRTemporary(jit, Reuse, operand);
1295     }
1296 }
1297 #endif
1298
1299 #if USE(JSVALUE64)
1300 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit, ReuseTag, JSValueOperand& operand)
1301 {
1302     m_gpr = GPRTemporary(jit, Reuse, operand);
1303 }
1304 #else
1305 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit, ReuseTag, JSValueOperand& operand)
1306 {
1307     if (jit->canReuse(operand.node())) {
1308         m_payloadGPR = GPRTemporary(jit, Reuse, operand, PayloadWord);
1309         m_tagGPR = GPRTemporary(jit, Reuse, operand, TagWord);
1310     } else {
1311         m_payloadGPR = GPRTemporary(jit);
1312         m_tagGPR = GPRTemporary(jit);
1313     }
1314 }
1315 #endif
1316
1317 JSValueRegsTemporary::~JSValueRegsTemporary() { }
1318
1319 JSValueRegs JSValueRegsTemporary::regs()
1320 {
1321 #if USE(JSVALUE64)
1322     return JSValueRegs(m_gpr.gpr());
1323 #else
1324     return JSValueRegs(m_tagGPR.gpr(), m_payloadGPR.gpr());
1325 #endif
1326 }
1327
1328 void GPRTemporary::adopt(GPRTemporary& other)
1329 {
1330     ASSERT(!m_jit);
1331     ASSERT(m_gpr == InvalidGPRReg);
1332     ASSERT(other.m_jit);
1333     ASSERT(other.m_gpr != InvalidGPRReg);
1334     m_jit = other.m_jit;
1335     m_gpr = other.m_gpr;
1336     other.m_jit = 0;
1337     other.m_gpr = InvalidGPRReg;
1338 }
1339
1340 FPRTemporary::FPRTemporary(FPRTemporary&& other)
1341 {
1342     ASSERT(other.m_jit);
1343     ASSERT(other.m_fpr != InvalidFPRReg);
1344     m_jit = other.m_jit;
1345     m_fpr = other.m_fpr;
1346
1347     other.m_jit = nullptr;
1348 }
1349
1350 FPRTemporary::FPRTemporary(SpeculativeJIT* jit)
1351     : m_jit(jit)
1352     , m_fpr(InvalidFPRReg)
1353 {
1354     m_fpr = m_jit->fprAllocate();
1355 }
1356
1357 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1)
1358     : m_jit(jit)
1359     , m_fpr(InvalidFPRReg)
1360 {
1361     if (m_jit->canReuse(op1.node()))
1362         m_fpr = m_jit->reuse(op1.fpr());
1363     else
1364         m_fpr = m_jit->fprAllocate();
1365 }
1366
1367 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1, SpeculateDoubleOperand& op2)
1368     : m_jit(jit)
1369     , m_fpr(InvalidFPRReg)
1370 {
1371     if (m_jit->canReuse(op1.node()))
1372         m_fpr = m_jit->reuse(op1.fpr());
1373     else if (m_jit->canReuse(op2.node()))
1374         m_fpr = m_jit->reuse(op2.fpr());
1375     else if (m_jit->canReuse(op1.node(), op2.node()) && op1.fpr() == op2.fpr())
1376         m_fpr = m_jit->reuse(op1.fpr());
1377     else
1378         m_fpr = m_jit->fprAllocate();
1379 }
1380
1381 #if USE(JSVALUE32_64)
1382 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, JSValueOperand& op1)
1383     : m_jit(jit)
1384     , m_fpr(InvalidFPRReg)
1385 {
1386     if (op1.isDouble() && m_jit->canReuse(op1.node()))
1387         m_fpr = m_jit->reuse(op1.fpr());
1388     else
1389         m_fpr = m_jit->fprAllocate();
1390 }
1391 #endif
1392
1393 void SpeculativeJIT::compilePeepHoleDoubleBranch(Node* node, Node* branchNode, JITCompiler::DoubleCondition condition)
1394 {
1395     BasicBlock* taken = branchNode->branchData()->taken.block;
1396     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1397
1398     if (taken == nextBlock()) {
1399         condition = MacroAssembler::invert(condition);
1400         std::swap(taken, notTaken);
1401     }
1402
1403     SpeculateDoubleOperand op1(this, node->child1());
1404     SpeculateDoubleOperand op2(this, node->child2());
1405     
1406     branchDouble(condition, op1.fpr(), op2.fpr(), taken);
1407     jump(notTaken);
1408 }
1409
1410 void SpeculativeJIT::compilePeepHoleObjectEquality(Node* node, Node* branchNode)
1411 {
1412     BasicBlock* taken = branchNode->branchData()->taken.block;
1413     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1414
1415     MacroAssembler::RelationalCondition condition = MacroAssembler::Equal;
1416     
1417     if (taken == nextBlock()) {
1418         condition = MacroAssembler::NotEqual;
1419         BasicBlock* tmp = taken;
1420         taken = notTaken;
1421         notTaken = tmp;
1422     }
1423
1424     SpeculateCellOperand op1(this, node->child1());
1425     SpeculateCellOperand op2(this, node->child2());
1426     
1427     GPRReg op1GPR = op1.gpr();
1428     GPRReg op2GPR = op2.gpr();
1429     
1430     if (masqueradesAsUndefinedWatchpointIsStillValid()) {
1431         if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1432             speculationCheck(
1433                 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(), m_jit.branchIfNotObject(op1GPR));
1434         }
1435         if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1436             speculationCheck(
1437                 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(), m_jit.branchIfNotObject(op2GPR));
1438         }
1439     } else {
1440         if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1441             speculationCheck(
1442                 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1443                 m_jit.branchIfNotObject(op1GPR));
1444         }
1445         speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1446             m_jit.branchTest8(
1447                 MacroAssembler::NonZero, 
1448                 MacroAssembler::Address(op1GPR, JSCell::typeInfoFlagsOffset()), 
1449                 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1450
1451         if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1452             speculationCheck(
1453                 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1454                 m_jit.branchIfNotObject(op2GPR));
1455         }
1456         speculationCheck(BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1457             m_jit.branchTest8(
1458                 MacroAssembler::NonZero, 
1459                 MacroAssembler::Address(op2GPR, JSCell::typeInfoFlagsOffset()), 
1460                 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1461     }
1462
1463     branchPtr(condition, op1GPR, op2GPR, taken);
1464     jump(notTaken);
1465 }
1466
1467 void SpeculativeJIT::compilePeepHoleBooleanBranch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1468 {
1469     BasicBlock* taken = branchNode->branchData()->taken.block;
1470     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1471
1472     // The branch instruction will branch to the taken block.
1473     // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1474     if (taken == nextBlock()) {
1475         condition = JITCompiler::invert(condition);
1476         BasicBlock* tmp = taken;
1477         taken = notTaken;
1478         notTaken = tmp;
1479     }
1480
1481     if (node->child1()->isInt32Constant()) {
1482         int32_t imm = node->child1()->asInt32();
1483         SpeculateBooleanOperand op2(this, node->child2());
1484         branch32(condition, JITCompiler::Imm32(imm), op2.gpr(), taken);
1485     } else if (node->child2()->isInt32Constant()) {
1486         SpeculateBooleanOperand op1(this, node->child1());
1487         int32_t imm = node->child2()->asInt32();
1488         branch32(condition, op1.gpr(), JITCompiler::Imm32(imm), taken);
1489     } else {
1490         SpeculateBooleanOperand op1(this, node->child1());
1491         SpeculateBooleanOperand op2(this, node->child2());
1492         branch32(condition, op1.gpr(), op2.gpr(), taken);
1493     }
1494
1495     jump(notTaken);
1496 }
1497
1498 void SpeculativeJIT::compileToLowerCase(Node* node)
1499 {
1500     ASSERT(node->op() == ToLowerCase);
1501     SpeculateCellOperand string(this, node->child1());
1502     GPRTemporary temp(this);
1503     GPRTemporary index(this);
1504     GPRTemporary charReg(this);
1505     GPRTemporary length(this);
1506
1507     GPRReg stringGPR = string.gpr();
1508     GPRReg tempGPR = temp.gpr();
1509     GPRReg indexGPR = index.gpr();
1510     GPRReg charGPR = charReg.gpr();
1511     GPRReg lengthGPR = length.gpr();
1512
1513     speculateString(node->child1(), stringGPR);
1514
1515     CCallHelpers::JumpList slowPath;
1516
1517     m_jit.move(TrustedImmPtr(0), indexGPR);
1518
1519     m_jit.loadPtr(MacroAssembler::Address(stringGPR, JSString::offsetOfValue()), tempGPR);
1520     slowPath.append(m_jit.branchTestPtr(MacroAssembler::Zero, tempGPR));
1521
1522     slowPath.append(m_jit.branchTest32(
1523         MacroAssembler::Zero, MacroAssembler::Address(tempGPR, StringImpl::flagsOffset()),
1524         MacroAssembler::TrustedImm32(StringImpl::flagIs8Bit())));
1525     m_jit.load32(MacroAssembler::Address(tempGPR, StringImpl::lengthMemoryOffset()), lengthGPR);
1526     m_jit.loadPtr(MacroAssembler::Address(tempGPR, StringImpl::dataOffset()), tempGPR);
1527
1528     auto loopStart = m_jit.label();
1529     auto loopDone = m_jit.branch32(CCallHelpers::AboveOrEqual, indexGPR, lengthGPR);
1530     m_jit.load8(MacroAssembler::BaseIndex(tempGPR, indexGPR, MacroAssembler::TimesOne), charGPR);
1531     slowPath.append(m_jit.branchTest32(CCallHelpers::NonZero, charGPR, TrustedImm32(~0x7F)));
1532     m_jit.sub32(TrustedImm32('A'), charGPR);
1533     slowPath.append(m_jit.branch32(CCallHelpers::BelowOrEqual, charGPR, TrustedImm32('Z' - 'A')));
1534
1535     m_jit.add32(TrustedImm32(1), indexGPR);
1536     m_jit.jump().linkTo(loopStart, &m_jit);
1537     
1538     slowPath.link(&m_jit);
1539     silentSpillAllRegisters(lengthGPR);
1540     callOperation(operationToLowerCase, lengthGPR, stringGPR, indexGPR);
1541     silentFillAllRegisters(lengthGPR);
1542     m_jit.exceptionCheck();
1543     auto done = m_jit.jump();
1544
1545     loopDone.link(&m_jit);
1546     m_jit.move(stringGPR, lengthGPR);
1547
1548     done.link(&m_jit);
1549     cellResult(lengthGPR, node);
1550 }
1551
1552 void SpeculativeJIT::compilePeepHoleInt32Branch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1553 {
1554     BasicBlock* taken = branchNode->branchData()->taken.block;
1555     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1556
1557     // The branch instruction will branch to the taken block.
1558     // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1559     if (taken == nextBlock()) {
1560         condition = JITCompiler::invert(condition);
1561         BasicBlock* tmp = taken;
1562         taken = notTaken;
1563         notTaken = tmp;
1564     }
1565
1566     if (node->child1()->isInt32Constant()) {
1567         int32_t imm = node->child1()->asInt32();
1568         SpeculateInt32Operand op2(this, node->child2());
1569         branch32(condition, JITCompiler::Imm32(imm), op2.gpr(), taken);
1570     } else if (node->child2()->isInt32Constant()) {
1571         SpeculateInt32Operand op1(this, node->child1());
1572         int32_t imm = node->child2()->asInt32();
1573         branch32(condition, op1.gpr(), JITCompiler::Imm32(imm), taken);
1574     } else {
1575         SpeculateInt32Operand op1(this, node->child1());
1576         SpeculateInt32Operand op2(this, node->child2());
1577         branch32(condition, op1.gpr(), op2.gpr(), taken);
1578     }
1579
1580     jump(notTaken);
1581 }
1582
1583 // Returns true if the compare is fused with a subsequent branch.
1584 bool SpeculativeJIT::compilePeepHoleBranch(Node* node, MacroAssembler::RelationalCondition condition, MacroAssembler::DoubleCondition doubleCondition, S_JITOperation_EJJ operation)
1585 {
1586     // Fused compare & branch.
1587     unsigned branchIndexInBlock = detectPeepHoleBranch();
1588     if (branchIndexInBlock != UINT_MAX) {
1589         Node* branchNode = m_block->at(branchIndexInBlock);
1590
1591         // detectPeepHoleBranch currently only permits the branch to be the very next node,
1592         // so can be no intervening nodes to also reference the compare. 
1593         ASSERT(node->adjustedRefCount() == 1);
1594
1595         if (node->isBinaryUseKind(Int32Use))
1596             compilePeepHoleInt32Branch(node, branchNode, condition);
1597 #if USE(JSVALUE64)
1598         else if (node->isBinaryUseKind(Int52RepUse))
1599             compilePeepHoleInt52Branch(node, branchNode, condition);
1600 #endif // USE(JSVALUE64)
1601         else if (node->isBinaryUseKind(StringUse) || node->isBinaryUseKind(StringIdentUse)) {
1602             // Use non-peephole comparison, for now.
1603             return false;
1604         } else if (node->isBinaryUseKind(DoubleRepUse))
1605             compilePeepHoleDoubleBranch(node, branchNode, doubleCondition);
1606         else if (node->op() == CompareEq) {
1607             if (node->isBinaryUseKind(BooleanUse))
1608                 compilePeepHoleBooleanBranch(node, branchNode, condition);
1609             else if (node->isBinaryUseKind(SymbolUse))
1610                 compilePeepHoleSymbolEquality(node, branchNode);
1611             else if (node->isBinaryUseKind(ObjectUse))
1612                 compilePeepHoleObjectEquality(node, branchNode);
1613             else if (node->isBinaryUseKind(ObjectUse, ObjectOrOtherUse))
1614                 compilePeepHoleObjectToObjectOrOtherEquality(node->child1(), node->child2(), branchNode);
1615             else if (node->isBinaryUseKind(ObjectOrOtherUse, ObjectUse))
1616                 compilePeepHoleObjectToObjectOrOtherEquality(node->child2(), node->child1(), branchNode);
1617             else if (!needsTypeCheck(node->child1(), SpecOther))
1618                 nonSpeculativePeepholeBranchNullOrUndefined(node->child2(), branchNode);
1619             else if (!needsTypeCheck(node->child2(), SpecOther))
1620                 nonSpeculativePeepholeBranchNullOrUndefined(node->child1(), branchNode);
1621             else {
1622                 nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1623                 return true;
1624             }
1625         } else {
1626             nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1627             return true;
1628         }
1629
1630         use(node->child1());
1631         use(node->child2());
1632         m_indexInBlock = branchIndexInBlock;
1633         m_currentNode = branchNode;
1634         return true;
1635     }
1636     return false;
1637 }
1638
1639 void SpeculativeJIT::noticeOSRBirth(Node* node)
1640 {
1641     if (!node->hasVirtualRegister())
1642         return;
1643     
1644     VirtualRegister virtualRegister = node->virtualRegister();
1645     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1646     
1647     info.noticeOSRBirth(*m_stream, node, virtualRegister);
1648 }
1649
1650 void SpeculativeJIT::compileMovHint(Node* node)
1651 {
1652     ASSERT(node->containsMovHint() && node->op() != ZombieHint);
1653     
1654     Node* child = node->child1().node();
1655     noticeOSRBirth(child);
1656     
1657     m_stream->appendAndLog(VariableEvent::movHint(MinifiedID(child), node->unlinkedLocal()));
1658 }
1659
1660 void SpeculativeJIT::bail(AbortReason reason)
1661 {
1662     if (verboseCompilationEnabled())
1663         dataLog("Bailing compilation.\n");
1664     m_compileOkay = true;
1665     m_jit.abortWithReason(reason, m_lastGeneratedNode);
1666     clearGenerationInfo();
1667 }
1668
1669 void SpeculativeJIT::compileCurrentBlock()
1670 {
1671     ASSERT(m_compileOkay);
1672     
1673     if (!m_block)
1674         return;
1675     
1676     ASSERT(m_block->isReachable);
1677     
1678     m_jit.blockHeads()[m_block->index] = m_jit.label();
1679
1680     if (!m_block->intersectionOfCFAHasVisited) {
1681         // Don't generate code for basic blocks that are unreachable according to CFA.
1682         // But to be sure that nobody has generated a jump to this block, drop in a
1683         // breakpoint here.
1684         m_jit.abortWithReason(DFGUnreachableBasicBlock);
1685         return;
1686     }
1687
1688     m_stream->appendAndLog(VariableEvent::reset());
1689     
1690     m_jit.jitAssertHasValidCallFrame();
1691     m_jit.jitAssertTagsInPlace();
1692     m_jit.jitAssertArgumentCountSane();
1693
1694     m_state.reset();
1695     m_state.beginBasicBlock(m_block);
1696     
1697     for (size_t i = m_block->variablesAtHead.size(); i--;) {
1698         int operand = m_block->variablesAtHead.operandForIndex(i);
1699         Node* node = m_block->variablesAtHead[i];
1700         if (!node)
1701             continue; // No need to record dead SetLocal's.
1702         
1703         VariableAccessData* variable = node->variableAccessData();
1704         DataFormat format;
1705         if (!node->refCount())
1706             continue; // No need to record dead SetLocal's.
1707         format = dataFormatFor(variable->flushFormat());
1708         m_stream->appendAndLog(
1709             VariableEvent::setLocal(
1710                 VirtualRegister(operand),
1711                 variable->machineLocal(),
1712                 format));
1713     }
1714
1715     m_origin = NodeOrigin();
1716     
1717     for (m_indexInBlock = 0; m_indexInBlock < m_block->size(); ++m_indexInBlock) {
1718         m_currentNode = m_block->at(m_indexInBlock);
1719         
1720         // We may have hit a contradiction that the CFA was aware of but that the JIT
1721         // didn't cause directly.
1722         if (!m_state.isValid()) {
1723             bail(DFGBailedAtTopOfBlock);
1724             return;
1725         }
1726
1727         m_interpreter.startExecuting();
1728         m_interpreter.executeKnownEdgeTypes(m_currentNode);
1729         m_jit.setForNode(m_currentNode);
1730         m_origin = m_currentNode->origin;
1731         if (validationEnabled())
1732             m_origin.exitOK &= mayExit(m_jit.graph(), m_currentNode) == Exits;
1733         m_lastGeneratedNode = m_currentNode->op();
1734         
1735         ASSERT(m_currentNode->shouldGenerate());
1736         
1737         if (verboseCompilationEnabled()) {
1738             dataLogF(
1739                 "SpeculativeJIT generating Node @%d (bc#%u) at JIT offset 0x%x",
1740                 (int)m_currentNode->index(),
1741                 m_currentNode->origin.semantic.bytecodeIndex, m_jit.debugOffset());
1742             dataLog("\n");
1743         }
1744
1745         if (Options::validateDFGExceptionHandling() && (mayExit(m_jit.graph(), m_currentNode) != DoesNotExit || m_currentNode->isTerminal()))
1746             m_jit.jitReleaseAssertNoException();
1747
1748         m_jit.pcToCodeOriginMapBuilder().appendItem(m_jit.labelIgnoringWatchpoints(), m_origin.semantic);
1749
1750         compile(m_currentNode);
1751         
1752         if (belongsInMinifiedGraph(m_currentNode->op()))
1753             m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode));
1754         
1755 #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
1756         m_jit.clearRegisterAllocationOffsets();
1757 #endif
1758         
1759         if (!m_compileOkay) {
1760             bail(DFGBailedAtEndOfNode);
1761             return;
1762         }
1763         
1764         // Make sure that the abstract state is rematerialized for the next node.
1765         m_interpreter.executeEffects(m_indexInBlock);
1766     }
1767     
1768     // Perform the most basic verification that children have been used correctly.
1769     if (!ASSERT_DISABLED) {
1770         for (auto& info : m_generationInfo)
1771             RELEASE_ASSERT(!info.alive());
1772     }
1773 }
1774
1775 // If we are making type predictions about our arguments then
1776 // we need to check that they are correct on function entry.
1777 void SpeculativeJIT::checkArgumentTypes()
1778 {
1779     ASSERT(!m_currentNode);
1780     m_origin = NodeOrigin(CodeOrigin(0), CodeOrigin(0), true);
1781
1782     for (int i = 0; i < m_jit.codeBlock()->numParameters(); ++i) {
1783         Node* node = m_jit.graph().m_arguments[i];
1784         if (!node) {
1785             // The argument is dead. We don't do any checks for such arguments.
1786             continue;
1787         }
1788         
1789         ASSERT(node->op() == SetArgument);
1790         ASSERT(node->shouldGenerate());
1791
1792         VariableAccessData* variableAccessData = node->variableAccessData();
1793         FlushFormat format = variableAccessData->flushFormat();
1794         
1795         if (format == FlushedJSValue)
1796             continue;
1797         
1798         VirtualRegister virtualRegister = variableAccessData->local();
1799
1800         JSValueSource valueSource = JSValueSource(JITCompiler::addressFor(virtualRegister));
1801         
1802 #if USE(JSVALUE64)
1803         switch (format) {
1804         case FlushedInt32: {
1805             speculationCheck(BadType, valueSource, node, m_jit.branch64(MacroAssembler::Below, JITCompiler::addressFor(virtualRegister), GPRInfo::tagTypeNumberRegister));
1806             break;
1807         }
1808         case FlushedBoolean: {
1809             GPRTemporary temp(this);
1810             m_jit.load64(JITCompiler::addressFor(virtualRegister), temp.gpr());
1811             m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), temp.gpr());
1812             speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, temp.gpr(), TrustedImm32(static_cast<int32_t>(~1))));
1813             break;
1814         }
1815         case FlushedCell: {
1816             speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, JITCompiler::addressFor(virtualRegister), GPRInfo::tagMaskRegister));
1817             break;
1818         }
1819         default:
1820             RELEASE_ASSERT_NOT_REACHED();
1821             break;
1822         }
1823 #else
1824         switch (format) {
1825         case FlushedInt32: {
1826             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::Int32Tag)));
1827             break;
1828         }
1829         case FlushedBoolean: {
1830             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::BooleanTag)));
1831             break;
1832         }
1833         case FlushedCell: {
1834             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::CellTag)));
1835             break;
1836         }
1837         default:
1838             RELEASE_ASSERT_NOT_REACHED();
1839             break;
1840         }
1841 #endif
1842     }
1843
1844     m_origin = NodeOrigin();
1845 }
1846
1847 bool SpeculativeJIT::compile()
1848 {
1849     checkArgumentTypes();
1850     
1851     ASSERT(!m_currentNode);
1852     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1853         m_jit.setForBlockIndex(blockIndex);
1854         m_block = m_jit.graph().block(blockIndex);
1855         compileCurrentBlock();
1856     }
1857     linkBranches();
1858     return true;
1859 }
1860
1861 void SpeculativeJIT::createOSREntries()
1862 {
1863     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1864         BasicBlock* block = m_jit.graph().block(blockIndex);
1865         if (!block)
1866             continue;
1867         if (!block->isOSRTarget)
1868             continue;
1869         
1870         // Currently we don't have OSR entry trampolines. We could add them
1871         // here if need be.
1872         m_osrEntryHeads.append(m_jit.blockHeads()[blockIndex]);
1873     }
1874 }
1875
1876 void SpeculativeJIT::linkOSREntries(LinkBuffer& linkBuffer)
1877 {
1878     unsigned osrEntryIndex = 0;
1879     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1880         BasicBlock* block = m_jit.graph().block(blockIndex);
1881         if (!block)
1882             continue;
1883         if (!block->isOSRTarget)
1884             continue;
1885         m_jit.noticeOSREntry(*block, m_osrEntryHeads[osrEntryIndex++], linkBuffer);
1886     }
1887     ASSERT(osrEntryIndex == m_osrEntryHeads.size());
1888     
1889     if (verboseCompilationEnabled()) {
1890         DumpContext dumpContext;
1891         dataLog("OSR Entries:\n");
1892         for (OSREntryData& entryData : m_jit.jitCode()->osrEntry)
1893             dataLog("    ", inContext(entryData, &dumpContext), "\n");
1894         if (!dumpContext.isEmpty())
1895             dumpContext.dump(WTF::dataFile());
1896     }
1897 }
1898     
1899 void SpeculativeJIT::compileCheckTraps(Node*)
1900 {
1901     GPRTemporary unused(this);
1902     GPRReg unusedGPR = unused.gpr();
1903
1904     JITCompiler::Jump needTrapHandling = m_jit.branchTest8(JITCompiler::NonZero,
1905         JITCompiler::AbsoluteAddress(m_jit.vm()->needTrapHandlingAddress()));
1906
1907     addSlowPathGenerator(slowPathCall(needTrapHandling, this, operationHandleTraps, unusedGPR));
1908 }
1909
1910 void SpeculativeJIT::compileDoublePutByVal(Node* node, SpeculateCellOperand& base, SpeculateStrictInt32Operand& property)
1911 {
1912     Edge child3 = m_jit.graph().varArgChild(node, 2);
1913     Edge child4 = m_jit.graph().varArgChild(node, 3);
1914
1915     ArrayMode arrayMode = node->arrayMode();
1916     
1917     GPRReg baseReg = base.gpr();
1918     GPRReg propertyReg = property.gpr();
1919     
1920     SpeculateDoubleOperand value(this, child3);
1921
1922     FPRReg valueReg = value.fpr();
1923     
1924     DFG_TYPE_CHECK(
1925         JSValueRegs(), child3, SpecFullRealNumber,
1926         m_jit.branchDouble(
1927             MacroAssembler::DoubleNotEqualOrUnordered, valueReg, valueReg));
1928     
1929     if (!m_compileOkay)
1930         return;
1931     
1932     StorageOperand storage(this, child4);
1933     GPRReg storageReg = storage.gpr();
1934
1935     if (node->op() == PutByValAlias) {
1936         // Store the value to the array.
1937         GPRReg propertyReg = property.gpr();
1938         FPRReg valueReg = value.fpr();
1939         m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
1940         
1941         noResult(m_currentNode);
1942         return;
1943     }
1944     
1945     GPRTemporary temporary;
1946     GPRReg temporaryReg = temporaryRegisterForPutByVal(temporary, node);
1947
1948     MacroAssembler::Jump slowCase;
1949     
1950     if (arrayMode.isInBounds()) {
1951         speculationCheck(
1952             OutOfBounds, JSValueRegs(), 0,
1953             m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())));
1954     } else {
1955         MacroAssembler::Jump inBounds = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
1956         
1957         slowCase = m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfVectorLength()));
1958         
1959         if (!arrayMode.isOutOfBounds())
1960             speculationCheck(OutOfBounds, JSValueRegs(), 0, slowCase);
1961         
1962         m_jit.add32(TrustedImm32(1), propertyReg, temporaryReg);
1963         m_jit.store32(temporaryReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
1964         
1965         inBounds.link(&m_jit);
1966     }
1967     
1968     m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
1969
1970     base.use();
1971     property.use();
1972     value.use();
1973     storage.use();
1974     
1975     if (arrayMode.isOutOfBounds()) {
1976         addSlowPathGenerator(
1977             slowPathCall(
1978                 slowCase, this,
1979                 m_jit.codeBlock()->isStrictMode() ? operationPutDoubleByValBeyondArrayBoundsStrict : operationPutDoubleByValBeyondArrayBoundsNonStrict,
1980                 NoResult, baseReg, propertyReg, valueReg));
1981     }
1982
1983     noResult(m_currentNode, UseChildrenCalledExplicitly);
1984 }
1985
1986 void SpeculativeJIT::compileGetCharCodeAt(Node* node)
1987 {
1988     SpeculateCellOperand string(this, node->child1());
1989     SpeculateStrictInt32Operand index(this, node->child2());
1990     StorageOperand storage(this, node->child3());
1991
1992     GPRReg stringReg = string.gpr();
1993     GPRReg indexReg = index.gpr();
1994     GPRReg storageReg = storage.gpr();
1995     
1996     ASSERT(speculationChecked(m_state.forNode(node->child1()).m_type, SpecString));
1997
1998     // unsigned comparison so we can filter out negative indices and indices that are too large
1999     speculationCheck(Uncountable, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::AboveOrEqual, indexReg, MacroAssembler::Address(stringReg, JSString::offsetOfLength())));
2000
2001     GPRTemporary scratch(this);
2002     GPRReg scratchReg = scratch.gpr();
2003
2004     m_jit.loadPtr(MacroAssembler::Address(stringReg, JSString::offsetOfValue()), scratchReg);
2005
2006     // Load the character into scratchReg
2007     JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
2008
2009     m_jit.load8(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesOne, 0), scratchReg);
2010     JITCompiler::Jump cont8Bit = m_jit.jump();
2011
2012     is16Bit.link(&m_jit);
2013
2014     m_jit.load16(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesTwo, 0), scratchReg);
2015
2016     cont8Bit.link(&m_jit);
2017
2018     int32Result(scratchReg, m_currentNode);
2019 }
2020
2021 void SpeculativeJIT::compileGetByValOnString(Node* node)
2022 {
2023     SpeculateCellOperand base(this, node->child1());
2024     SpeculateStrictInt32Operand property(this, node->child2());
2025     StorageOperand storage(this, node->child3());
2026     GPRReg baseReg = base.gpr();
2027     GPRReg propertyReg = property.gpr();
2028     GPRReg storageReg = storage.gpr();
2029
2030     GPRTemporary scratch(this);
2031     GPRReg scratchReg = scratch.gpr();
2032 #if USE(JSVALUE32_64)
2033     GPRTemporary resultTag;
2034     GPRReg resultTagReg = InvalidGPRReg;
2035     if (node->arrayMode().isOutOfBounds()) {
2036         GPRTemporary realResultTag(this);
2037         resultTag.adopt(realResultTag);
2038         resultTagReg = resultTag.gpr();
2039     }
2040 #endif
2041
2042     ASSERT(ArrayMode(Array::String).alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2043
2044     // unsigned comparison so we can filter out negative indices and indices that are too large
2045     JITCompiler::Jump outOfBounds = m_jit.branch32(
2046         MacroAssembler::AboveOrEqual, propertyReg,
2047         MacroAssembler::Address(baseReg, JSString::offsetOfLength()));
2048     if (node->arrayMode().isInBounds())
2049         speculationCheck(OutOfBounds, JSValueRegs(), 0, outOfBounds);
2050
2051     m_jit.loadPtr(MacroAssembler::Address(baseReg, JSString::offsetOfValue()), scratchReg);
2052
2053     // Load the character into scratchReg
2054     JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
2055
2056     m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne, 0), scratchReg);
2057     JITCompiler::Jump cont8Bit = m_jit.jump();
2058
2059     is16Bit.link(&m_jit);
2060
2061     m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo, 0), scratchReg);
2062
2063     JITCompiler::Jump bigCharacter =
2064         m_jit.branch32(MacroAssembler::AboveOrEqual, scratchReg, TrustedImm32(0x100));
2065
2066     // 8 bit string values don't need the isASCII check.
2067     cont8Bit.link(&m_jit);
2068
2069     m_jit.lshift32(MacroAssembler::TrustedImm32(sizeof(void*) == 4 ? 2 : 3), scratchReg);
2070     m_jit.addPtr(TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), scratchReg);
2071     m_jit.loadPtr(scratchReg, scratchReg);
2072
2073     addSlowPathGenerator(
2074         slowPathCall(
2075             bigCharacter, this, operationSingleCharacterString, scratchReg, scratchReg));
2076
2077     if (node->arrayMode().isOutOfBounds()) {
2078 #if USE(JSVALUE32_64)
2079         m_jit.move(TrustedImm32(JSValue::CellTag), resultTagReg);
2080 #endif
2081
2082         JSGlobalObject* globalObject = m_jit.globalObjectFor(node->origin.semantic);
2083         bool prototypeChainIsSane = false;
2084         if (globalObject->stringPrototypeChainIsSane()) {
2085             // FIXME: This could be captured using a Speculation mode that means "out-of-bounds
2086             // loads return a trivial value". Something like SaneChainOutOfBounds. This should
2087             // speculate that we don't take negative out-of-bounds, or better yet, it should rely
2088             // on a stringPrototypeChainIsSane() guaranteeing that the prototypes have no negative
2089             // indexed properties either.
2090             // https://bugs.webkit.org/show_bug.cgi?id=144668
2091             m_jit.graph().watchpoints().addLazily(globalObject->stringPrototype()->structure()->transitionWatchpointSet());
2092             m_jit.graph().watchpoints().addLazily(globalObject->objectPrototype()->structure()->transitionWatchpointSet());
2093             prototypeChainIsSane = globalObject->stringPrototypeChainIsSane();
2094         }
2095         if (prototypeChainIsSane) {
2096             m_jit.graph().watchpoints().addLazily(globalObject->stringPrototype()->structure()->transitionWatchpointSet());
2097             m_jit.graph().watchpoints().addLazily(globalObject->objectPrototype()->structure()->transitionWatchpointSet());
2098             
2099 #if USE(JSVALUE64)
2100             addSlowPathGenerator(std::make_unique<SaneStringGetByValSlowPathGenerator>(
2101                 outOfBounds, this, JSValueRegs(scratchReg), baseReg, propertyReg));
2102 #else
2103             addSlowPathGenerator(std::make_unique<SaneStringGetByValSlowPathGenerator>(
2104                 outOfBounds, this, JSValueRegs(resultTagReg, scratchReg),
2105                 baseReg, propertyReg));
2106 #endif
2107         } else {
2108 #if USE(JSVALUE64)
2109             addSlowPathGenerator(
2110                 slowPathCall(
2111                     outOfBounds, this, operationGetByValStringInt,
2112                     scratchReg, baseReg, propertyReg));
2113 #else
2114             addSlowPathGenerator(
2115                 slowPathCall(
2116                     outOfBounds, this, operationGetByValStringInt,
2117                     JSValueRegs(resultTagReg, scratchReg), baseReg, propertyReg));
2118 #endif
2119         }
2120         
2121 #if USE(JSVALUE64)
2122         jsValueResult(scratchReg, m_currentNode);
2123 #else
2124         jsValueResult(resultTagReg, scratchReg, m_currentNode);
2125 #endif
2126     } else
2127         cellResult(scratchReg, m_currentNode);
2128 }
2129
2130 void SpeculativeJIT::compileFromCharCode(Node* node)
2131 {
2132     Edge& child = node->child1();
2133     if (child.useKind() == UntypedUse) {
2134         JSValueOperand opr(this, child);
2135         JSValueRegs oprRegs = opr.jsValueRegs();
2136 #if USE(JSVALUE64)
2137         GPRTemporary result(this);
2138         JSValueRegs resultRegs = JSValueRegs(result.gpr());
2139 #else
2140         GPRTemporary resultTag(this);
2141         GPRTemporary resultPayload(this);
2142         JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
2143 #endif
2144         flushRegisters();
2145         callOperation(operationStringFromCharCodeUntyped, resultRegs, oprRegs);
2146         m_jit.exceptionCheck();
2147         
2148         jsValueResult(resultRegs, node);
2149         return;
2150     }
2151
2152     SpeculateStrictInt32Operand property(this, child);
2153     GPRReg propertyReg = property.gpr();
2154     GPRTemporary smallStrings(this);
2155     GPRTemporary scratch(this);
2156     GPRReg scratchReg = scratch.gpr();
2157     GPRReg smallStringsReg = smallStrings.gpr();
2158
2159     JITCompiler::JumpList slowCases;
2160     slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, TrustedImm32(0xff)));
2161     m_jit.move(TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), smallStringsReg);
2162     m_jit.loadPtr(MacroAssembler::BaseIndex(smallStringsReg, propertyReg, MacroAssembler::ScalePtr, 0), scratchReg);
2163
2164     slowCases.append(m_jit.branchTest32(MacroAssembler::Zero, scratchReg));
2165     addSlowPathGenerator(slowPathCall(slowCases, this, operationStringFromCharCode, scratchReg, propertyReg));
2166     cellResult(scratchReg, m_currentNode);
2167 }
2168
2169 GeneratedOperandType SpeculativeJIT::checkGeneratedTypeForToInt32(Node* node)
2170 {
2171     VirtualRegister virtualRegister = node->virtualRegister();
2172     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
2173
2174     switch (info.registerFormat()) {
2175     case DataFormatStorage:
2176         RELEASE_ASSERT_NOT_REACHED();
2177
2178     case DataFormatBoolean:
2179     case DataFormatCell:
2180         terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
2181         return GeneratedOperandTypeUnknown;
2182
2183     case DataFormatNone:
2184     case DataFormatJSCell:
2185     case DataFormatJS:
2186     case DataFormatJSBoolean:
2187     case DataFormatJSDouble:
2188         return GeneratedOperandJSValue;
2189
2190     case DataFormatJSInt32:
2191     case DataFormatInt32:
2192         return GeneratedOperandInteger;
2193
2194     default:
2195         RELEASE_ASSERT_NOT_REACHED();
2196         return GeneratedOperandTypeUnknown;
2197     }
2198 }
2199
2200 void SpeculativeJIT::compileValueToInt32(Node* node)
2201 {
2202     switch (node->child1().useKind()) {
2203 #if USE(JSVALUE64)
2204     case Int52RepUse: {
2205         SpeculateStrictInt52Operand op1(this, node->child1());
2206         GPRTemporary result(this, Reuse, op1);
2207         GPRReg op1GPR = op1.gpr();
2208         GPRReg resultGPR = result.gpr();
2209         m_jit.zeroExtend32ToPtr(op1GPR, resultGPR);
2210         int32Result(resultGPR, node, DataFormatInt32);
2211         return;
2212     }
2213 #endif // USE(JSVALUE64)
2214         
2215     case DoubleRepUse: {
2216         GPRTemporary result(this);
2217         SpeculateDoubleOperand op1(this, node->child1());
2218         FPRReg fpr = op1.fpr();
2219         GPRReg gpr = result.gpr();
2220         JITCompiler::Jump notTruncatedToInteger = m_jit.branchTruncateDoubleToInt32(fpr, gpr, JITCompiler::BranchIfTruncateFailed);
2221         
2222         addSlowPathGenerator(slowPathCall(notTruncatedToInteger, this,
2223             hasSensibleDoubleToInt() ? operationToInt32SensibleSlow : operationToInt32, NeedToSpill, ExceptionCheckRequirement::CheckNotNeeded, gpr, fpr));
2224         
2225         int32Result(gpr, node);
2226         return;
2227     }
2228     
2229     case NumberUse:
2230     case NotCellUse: {
2231         switch (checkGeneratedTypeForToInt32(node->child1().node())) {
2232         case GeneratedOperandInteger: {
2233             SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
2234             GPRTemporary result(this, Reuse, op1);
2235             m_jit.move(op1.gpr(), result.gpr());
2236             int32Result(result.gpr(), node, op1.format());
2237             return;
2238         }
2239         case GeneratedOperandJSValue: {
2240             GPRTemporary result(this);
2241 #if USE(JSVALUE64)
2242             JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2243
2244             GPRReg gpr = op1.gpr();
2245             GPRReg resultGpr = result.gpr();
2246             FPRTemporary tempFpr(this);
2247             FPRReg fpr = tempFpr.fpr();
2248
2249             JITCompiler::Jump isInteger = m_jit.branch64(MacroAssembler::AboveOrEqual, gpr, GPRInfo::tagTypeNumberRegister);
2250             JITCompiler::JumpList converted;
2251
2252             if (node->child1().useKind() == NumberUse) {
2253                 DFG_TYPE_CHECK(
2254                     JSValueRegs(gpr), node->child1(), SpecBytecodeNumber,
2255                     m_jit.branchTest64(
2256                         MacroAssembler::Zero, gpr, GPRInfo::tagTypeNumberRegister));
2257             } else {
2258                 JITCompiler::Jump isNumber = m_jit.branchTest64(MacroAssembler::NonZero, gpr, GPRInfo::tagTypeNumberRegister);
2259                 
2260                 DFG_TYPE_CHECK(
2261                     JSValueRegs(gpr), node->child1(), ~SpecCell, m_jit.branchIfCell(JSValueRegs(gpr)));
2262                 
2263                 // It's not a cell: so true turns into 1 and all else turns into 0.
2264                 m_jit.compare64(JITCompiler::Equal, gpr, TrustedImm32(ValueTrue), resultGpr);
2265                 converted.append(m_jit.jump());
2266                 
2267                 isNumber.link(&m_jit);
2268             }
2269
2270             // First, if we get here we have a double encoded as a JSValue
2271             unboxDouble(gpr, resultGpr, fpr);
2272
2273             silentSpillAllRegisters(resultGpr);
2274             callOperation(operationToInt32, resultGpr, fpr);
2275             silentFillAllRegisters(resultGpr);
2276
2277             converted.append(m_jit.jump());
2278
2279             isInteger.link(&m_jit);
2280             m_jit.zeroExtend32ToPtr(gpr, resultGpr);
2281
2282             converted.link(&m_jit);
2283 #else
2284             Node* childNode = node->child1().node();
2285             VirtualRegister virtualRegister = childNode->virtualRegister();
2286             GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
2287
2288             JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2289
2290             GPRReg payloadGPR = op1.payloadGPR();
2291             GPRReg resultGpr = result.gpr();
2292         
2293             JITCompiler::JumpList converted;
2294
2295             if (info.registerFormat() == DataFormatJSInt32)
2296                 m_jit.move(payloadGPR, resultGpr);
2297             else {
2298                 GPRReg tagGPR = op1.tagGPR();
2299                 FPRTemporary tempFpr(this);
2300                 FPRReg fpr = tempFpr.fpr();
2301                 FPRTemporary scratch(this);
2302
2303                 JITCompiler::Jump isInteger = m_jit.branch32(MacroAssembler::Equal, tagGPR, TrustedImm32(JSValue::Int32Tag));
2304
2305                 if (node->child1().useKind() == NumberUse) {
2306                     DFG_TYPE_CHECK(
2307                         op1.jsValueRegs(), node->child1(), SpecBytecodeNumber,
2308                         m_jit.branch32(
2309                             MacroAssembler::AboveOrEqual, tagGPR,
2310                             TrustedImm32(JSValue::LowestTag)));
2311                 } else {
2312                     JITCompiler::Jump isNumber = m_jit.branch32(MacroAssembler::Below, tagGPR, TrustedImm32(JSValue::LowestTag));
2313                     
2314                     DFG_TYPE_CHECK(
2315                         op1.jsValueRegs(), node->child1(), ~SpecCell,
2316                         m_jit.branchIfCell(op1.jsValueRegs()));
2317                     
2318                     // It's not a cell: so true turns into 1 and all else turns into 0.
2319                     JITCompiler::Jump isBoolean = m_jit.branch32(JITCompiler::Equal, tagGPR, TrustedImm32(JSValue::BooleanTag));
2320                     m_jit.move(TrustedImm32(0), resultGpr);
2321                     converted.append(m_jit.jump());
2322                     
2323                     isBoolean.link(&m_jit);
2324                     m_jit.move(payloadGPR, resultGpr);
2325                     converted.append(m_jit.jump());
2326                     
2327                     isNumber.link(&m_jit);
2328                 }
2329
2330                 unboxDouble(tagGPR, payloadGPR, fpr, scratch.fpr());
2331
2332                 silentSpillAllRegisters(resultGpr);
2333                 callOperation(operationToInt32, resultGpr, fpr);
2334                 silentFillAllRegisters(resultGpr);
2335
2336                 converted.append(m_jit.jump());
2337
2338                 isInteger.link(&m_jit);
2339                 m_jit.move(payloadGPR, resultGpr);
2340
2341                 converted.link(&m_jit);
2342             }
2343 #endif
2344             int32Result(resultGpr, node);
2345             return;
2346         }
2347         case GeneratedOperandTypeUnknown:
2348             RELEASE_ASSERT(!m_compileOkay);
2349             return;
2350         }
2351         RELEASE_ASSERT_NOT_REACHED();
2352         return;
2353     }
2354     
2355     default:
2356         ASSERT(!m_compileOkay);
2357         return;
2358     }
2359 }
2360
2361 void SpeculativeJIT::compileUInt32ToNumber(Node* node)
2362 {
2363     if (doesOverflow(node->arithMode())) {
2364         if (enableInt52()) {
2365             SpeculateInt32Operand op1(this, node->child1());
2366             GPRTemporary result(this, Reuse, op1);
2367             m_jit.zeroExtend32ToPtr(op1.gpr(), result.gpr());
2368             strictInt52Result(result.gpr(), node);
2369             return;
2370         }
2371         SpeculateInt32Operand op1(this, node->child1());
2372         FPRTemporary result(this);
2373             
2374         GPRReg inputGPR = op1.gpr();
2375         FPRReg outputFPR = result.fpr();
2376             
2377         m_jit.convertInt32ToDouble(inputGPR, outputFPR);
2378             
2379         JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, inputGPR, TrustedImm32(0));
2380         m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), outputFPR);
2381         positive.link(&m_jit);
2382             
2383         doubleResult(outputFPR, node);
2384         return;
2385     }
2386     
2387     RELEASE_ASSERT(node->arithMode() == Arith::CheckOverflow);
2388
2389     SpeculateInt32Operand op1(this, node->child1());
2390     GPRTemporary result(this);
2391
2392     m_jit.move(op1.gpr(), result.gpr());
2393
2394     speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, result.gpr(), TrustedImm32(0)));
2395
2396     int32Result(result.gpr(), node, op1.format());
2397 }
2398
2399 void SpeculativeJIT::compileDoubleAsInt32(Node* node)
2400 {
2401     SpeculateDoubleOperand op1(this, node->child1());
2402     FPRTemporary scratch(this);
2403     GPRTemporary result(this);
2404     
2405     FPRReg valueFPR = op1.fpr();
2406     FPRReg scratchFPR = scratch.fpr();
2407     GPRReg resultGPR = result.gpr();
2408
2409     JITCompiler::JumpList failureCases;
2410     RELEASE_ASSERT(shouldCheckOverflow(node->arithMode()));
2411     m_jit.branchConvertDoubleToInt32(
2412         valueFPR, resultGPR, failureCases, scratchFPR,
2413         shouldCheckNegativeZero(node->arithMode()));
2414     speculationCheck(Overflow, JSValueRegs(), 0, failureCases);
2415
2416     int32Result(resultGPR, node);
2417 }
2418
2419 void SpeculativeJIT::compileDoubleRep(Node* node)
2420 {
2421     switch (node->child1().useKind()) {
2422     case RealNumberUse: {
2423         JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2424         FPRTemporary result(this);
2425         
2426         JSValueRegs op1Regs = op1.jsValueRegs();
2427         FPRReg resultFPR = result.fpr();
2428         
2429 #if USE(JSVALUE64)
2430         GPRTemporary temp(this);
2431         GPRReg tempGPR = temp.gpr();
2432         m_jit.unboxDoubleWithoutAssertions(op1Regs.gpr(), tempGPR, resultFPR);
2433 #else
2434         FPRTemporary temp(this);
2435         FPRReg tempFPR = temp.fpr();
2436         unboxDouble(op1Regs.tagGPR(), op1Regs.payloadGPR(), resultFPR, tempFPR);
2437 #endif
2438         
2439         JITCompiler::Jump done = m_jit.branchDouble(
2440             JITCompiler::DoubleEqual, resultFPR, resultFPR);
2441         
2442         DFG_TYPE_CHECK(
2443             op1Regs, node->child1(), SpecBytecodeRealNumber, m_jit.branchIfNotInt32(op1Regs));
2444         m_jit.convertInt32ToDouble(op1Regs.payloadGPR(), resultFPR);
2445         
2446         done.link(&m_jit);
2447         
2448         doubleResult(resultFPR, node);
2449         return;
2450     }
2451     
2452     case NotCellUse:
2453     case NumberUse: {
2454         ASSERT(!node->child1()->isNumberConstant()); // This should have been constant folded.
2455
2456         SpeculatedType possibleTypes = m_state.forNode(node->child1()).m_type;
2457         if (isInt32Speculation(possibleTypes)) {
2458             SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
2459             FPRTemporary result(this);
2460             m_jit.convertInt32ToDouble(op1.gpr(), result.fpr());
2461             doubleResult(result.fpr(), node);
2462             return;
2463         }
2464
2465         JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2466         FPRTemporary result(this);
2467
2468 #if USE(JSVALUE64)
2469         GPRTemporary temp(this);
2470
2471         GPRReg op1GPR = op1.gpr();
2472         GPRReg tempGPR = temp.gpr();
2473         FPRReg resultFPR = result.fpr();
2474         JITCompiler::JumpList done;
2475
2476         JITCompiler::Jump isInteger = m_jit.branch64(
2477             MacroAssembler::AboveOrEqual, op1GPR, GPRInfo::tagTypeNumberRegister);
2478
2479         if (node->child1().useKind() == NotCellUse) {
2480             JITCompiler::Jump isNumber = m_jit.branchTest64(MacroAssembler::NonZero, op1GPR, GPRInfo::tagTypeNumberRegister);
2481             JITCompiler::Jump isUndefined = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueUndefined));
2482
2483             static const double zero = 0;
2484             m_jit.loadDouble(TrustedImmPtr(&zero), resultFPR);
2485
2486             JITCompiler::Jump isNull = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueNull));
2487             done.append(isNull);
2488
2489             DFG_TYPE_CHECK(JSValueRegs(op1GPR), node->child1(), ~SpecCell,
2490                 m_jit.branchTest64(JITCompiler::Zero, op1GPR, TrustedImm32(static_cast<int32_t>(TagBitBool))));
2491
2492             JITCompiler::Jump isFalse = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueFalse));
2493             static const double one = 1;
2494             m_jit.loadDouble(TrustedImmPtr(&one), resultFPR);
2495             done.append(m_jit.jump());
2496             done.append(isFalse);
2497
2498             isUndefined.link(&m_jit);
2499             static const double NaN = PNaN;
2500             m_jit.loadDouble(TrustedImmPtr(&NaN), resultFPR);
2501             done.append(m_jit.jump());
2502
2503             isNumber.link(&m_jit);
2504         } else if (needsTypeCheck(node->child1(), SpecBytecodeNumber)) {
2505             typeCheck(
2506                 JSValueRegs(op1GPR), node->child1(), SpecBytecodeNumber,
2507                 m_jit.branchTest64(MacroAssembler::Zero, op1GPR, GPRInfo::tagTypeNumberRegister));
2508         }
2509
2510         unboxDouble(op1GPR, tempGPR, resultFPR);
2511         done.append(m_jit.jump());
2512     
2513         isInteger.link(&m_jit);
2514         m_jit.convertInt32ToDouble(op1GPR, resultFPR);
2515         done.link(&m_jit);
2516 #else // USE(JSVALUE64) -> this is the 32_64 case
2517         FPRTemporary temp(this);
2518     
2519         GPRReg op1TagGPR = op1.tagGPR();
2520         GPRReg op1PayloadGPR = op1.payloadGPR();
2521         FPRReg tempFPR = temp.fpr();
2522         FPRReg resultFPR = result.fpr();
2523         JITCompiler::JumpList done;
2524     
2525         JITCompiler::Jump isInteger = m_jit.branch32(
2526             MacroAssembler::Equal, op1TagGPR, TrustedImm32(JSValue::Int32Tag));
2527
2528         if (node->child1().useKind() == NotCellUse) {
2529             JITCompiler::Jump isNumber = m_jit.branch32(JITCompiler::Below, op1TagGPR, JITCompiler::TrustedImm32(JSValue::LowestTag + 1));
2530             JITCompiler::Jump isUndefined = m_jit.branch32(JITCompiler::Equal, op1TagGPR, TrustedImm32(JSValue::UndefinedTag));
2531
2532             static const double zero = 0;
2533             m_jit.loadDouble(TrustedImmPtr(&zero), resultFPR);
2534
2535             JITCompiler::Jump isNull = m_jit.branch32(JITCompiler::Equal, op1TagGPR, TrustedImm32(JSValue::NullTag));
2536             done.append(isNull);
2537
2538             DFG_TYPE_CHECK(JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), ~SpecCell, m_jit.branch32(JITCompiler::NotEqual, op1TagGPR, TrustedImm32(JSValue::BooleanTag)));
2539
2540             JITCompiler::Jump isFalse = m_jit.branchTest32(JITCompiler::Zero, op1PayloadGPR, TrustedImm32(1));
2541             static const double one = 1;
2542             m_jit.loadDouble(TrustedImmPtr(&one), resultFPR);
2543             done.append(m_jit.jump());
2544             done.append(isFalse);
2545
2546             isUndefined.link(&m_jit);
2547             static const double NaN = PNaN;
2548             m_jit.loadDouble(TrustedImmPtr(&NaN), resultFPR);
2549             done.append(m_jit.jump());
2550
2551             isNumber.link(&m_jit);
2552         } else if (needsTypeCheck(node->child1(), SpecBytecodeNumber)) {
2553             typeCheck(
2554                 JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), SpecBytecodeNumber,
2555                 m_jit.branch32(MacroAssembler::AboveOrEqual, op1TagGPR, TrustedImm32(JSValue::LowestTag)));
2556         }
2557
2558         unboxDouble(op1TagGPR, op1PayloadGPR, resultFPR, tempFPR);
2559         done.append(m_jit.jump());
2560     
2561         isInteger.link(&m_jit);
2562         m_jit.convertInt32ToDouble(op1PayloadGPR, resultFPR);
2563         done.link(&m_jit);
2564 #endif // USE(JSVALUE64)
2565     
2566         doubleResult(resultFPR, node);
2567         return;
2568     }
2569         
2570 #if USE(JSVALUE64)
2571     case Int52RepUse: {
2572         SpeculateStrictInt52Operand value(this, node->child1());
2573         FPRTemporary result(this);
2574         
2575         GPRReg valueGPR = value.gpr();
2576         FPRReg resultFPR = result.fpr();
2577
2578         m_jit.convertInt64ToDouble(valueGPR, resultFPR);
2579         
2580         doubleResult(resultFPR, node);
2581         return;
2582     }
2583 #endif // USE(JSVALUE64)
2584         
2585     default:
2586         RELEASE_ASSERT_NOT_REACHED();
2587         return;
2588     }
2589 }
2590
2591 void SpeculativeJIT::compileValueRep(Node* node)
2592 {
2593     switch (node->child1().useKind()) {
2594     case DoubleRepUse: {
2595         SpeculateDoubleOperand value(this, node->child1());
2596         JSValueRegsTemporary result(this);
2597         
2598         FPRReg valueFPR = value.fpr();
2599         JSValueRegs resultRegs = result.regs();
2600         
2601         // It's very tempting to in-place filter the value to indicate that it's not impure NaN
2602         // anymore. Unfortunately, this would be unsound. If it's a GetLocal or if the value was
2603         // subject to a prior SetLocal, filtering the value would imply that the corresponding
2604         // local was purified.
2605         if (needsTypeCheck(node->child1(), ~SpecDoubleImpureNaN))
2606             m_jit.purifyNaN(valueFPR);
2607
2608         boxDouble(valueFPR, resultRegs);
2609         
2610         jsValueResult(resultRegs, node);
2611         return;
2612     }
2613         
2614 #if USE(JSVALUE64)
2615     case Int52RepUse: {
2616         SpeculateStrictInt52Operand value(this, node->child1());
2617         GPRTemporary result(this);
2618         
2619         GPRReg valueGPR = value.gpr();
2620         GPRReg resultGPR = result.gpr();
2621         
2622         boxInt52(valueGPR, resultGPR, DataFormatStrictInt52);
2623         
2624         jsValueResult(resultGPR, node);
2625         return;
2626     }
2627 #endif // USE(JSVALUE64)
2628         
2629     default:
2630         RELEASE_ASSERT_NOT_REACHED();
2631         return;
2632     }
2633 }
2634
2635 static double clampDoubleToByte(double d)
2636 {
2637     d += 0.5;
2638     if (!(d > 0))
2639         d = 0;
2640     else if (d > 255)
2641         d = 255;
2642     return d;
2643 }
2644
2645 static void compileClampIntegerToByte(JITCompiler& jit, GPRReg result)
2646 {
2647     MacroAssembler::Jump inBounds = jit.branch32(MacroAssembler::BelowOrEqual, result, JITCompiler::TrustedImm32(0xff));
2648     MacroAssembler::Jump tooBig = jit.branch32(MacroAssembler::GreaterThan, result, JITCompiler::TrustedImm32(0xff));
2649     jit.xorPtr(result, result);
2650     MacroAssembler::Jump clamped = jit.jump();
2651     tooBig.link(&jit);
2652     jit.move(JITCompiler::TrustedImm32(255), result);
2653     clamped.link(&jit);
2654     inBounds.link(&jit);
2655 }
2656
2657 static void compileClampDoubleToByte(JITCompiler& jit, GPRReg result, FPRReg source, FPRReg scratch)
2658 {
2659     // Unordered compare so we pick up NaN
2660     static const double zero = 0;
2661     static const double byteMax = 255;
2662     static const double half = 0.5;
2663     jit.loadDouble(JITCompiler::TrustedImmPtr(&zero), scratch);
2664     MacroAssembler::Jump tooSmall = jit.branchDouble(MacroAssembler::DoubleLessThanOrEqualOrUnordered, source, scratch);
2665     jit.loadDouble(JITCompiler::TrustedImmPtr(&byteMax), scratch);
2666     MacroAssembler::Jump tooBig = jit.branchDouble(MacroAssembler::DoubleGreaterThan, source, scratch);
2667     
2668     jit.loadDouble(JITCompiler::TrustedImmPtr(&half), scratch);
2669     // FIXME: This should probably just use a floating point round!
2670     // https://bugs.webkit.org/show_bug.cgi?id=72054
2671     jit.addDouble(source, scratch);
2672     jit.truncateDoubleToInt32(scratch, result);   
2673     MacroAssembler::Jump truncatedInt = jit.jump();
2674     
2675     tooSmall.link(&jit);
2676     jit.xorPtr(result, result);
2677     MacroAssembler::Jump zeroed = jit.jump();
2678     
2679     tooBig.link(&jit);
2680     jit.move(JITCompiler::TrustedImm32(255), result);
2681     
2682     truncatedInt.link(&jit);
2683     zeroed.link(&jit);
2684
2685 }
2686
2687 JITCompiler::Jump SpeculativeJIT::jumpForTypedArrayOutOfBounds(Node* node, GPRReg baseGPR, GPRReg indexGPR)
2688 {
2689     if (node->op() == PutByValAlias)
2690         return JITCompiler::Jump();
2691     JSArrayBufferView* view = m_jit.graph().tryGetFoldableView(
2692         m_state.forNode(m_jit.graph().child(node, 0)).m_value, node->arrayMode());
2693     if (view) {
2694         uint32_t length = view->length();
2695         Node* indexNode = m_jit.graph().child(node, 1).node();
2696         if (indexNode->isInt32Constant() && indexNode->asUInt32() < length)
2697             return JITCompiler::Jump();
2698         return m_jit.branch32(
2699             MacroAssembler::AboveOrEqual, indexGPR, MacroAssembler::Imm32(length));
2700     }
2701     return m_jit.branch32(
2702         MacroAssembler::AboveOrEqual, indexGPR,
2703         MacroAssembler::Address(baseGPR, JSArrayBufferView::offsetOfLength()));
2704 }
2705
2706 void SpeculativeJIT::emitTypedArrayBoundsCheck(Node* node, GPRReg baseGPR, GPRReg indexGPR)
2707 {
2708     JITCompiler::Jump jump = jumpForTypedArrayOutOfBounds(node, baseGPR, indexGPR);
2709     if (!jump.isSet())
2710         return;
2711     speculationCheck(OutOfBounds, JSValueRegs(), 0, jump);
2712 }
2713
2714 JITCompiler::Jump SpeculativeJIT::jumpForTypedArrayIsNeuteredIfOutOfBounds(Node* node, GPRReg base, JITCompiler::Jump outOfBounds)
2715 {
2716     JITCompiler::Jump done;
2717     if (outOfBounds.isSet()) {
2718         done = m_jit.jump();
2719         if (node->arrayMode().isInBounds())
2720             speculationCheck(OutOfBounds, JSValueSource(), 0, outOfBounds);
2721         else {
2722             outOfBounds.link(&m_jit);
2723
2724             JITCompiler::Jump notWasteful = m_jit.branch32(
2725                 MacroAssembler::NotEqual,
2726                 MacroAssembler::Address(base, JSArrayBufferView::offsetOfMode()),
2727                 TrustedImm32(WastefulTypedArray));
2728
2729             JITCompiler::Jump hasNullVector = m_jit.branchTestPtr(
2730                 MacroAssembler::Zero,
2731                 MacroAssembler::Address(base, JSArrayBufferView::offsetOfVector()));
2732             speculationCheck(Uncountable, JSValueSource(), node, hasNullVector);
2733             notWasteful.link(&m_jit);
2734         }
2735     }
2736     return done;
2737 }
2738
2739 void SpeculativeJIT::compileGetByValOnIntTypedArray(Node* node, TypedArrayType type)
2740 {
2741     ASSERT(isInt(type));
2742     
2743     SpeculateCellOperand base(this, node->child1());
2744     SpeculateStrictInt32Operand property(this, node->child2());
2745     StorageOperand storage(this, node->child3());
2746
2747     GPRReg baseReg = base.gpr();
2748     GPRReg propertyReg = property.gpr();
2749     GPRReg storageReg = storage.gpr();
2750
2751     GPRTemporary result(this);
2752     GPRReg resultReg = result.gpr();
2753
2754     ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2755
2756     emitTypedArrayBoundsCheck(node, baseReg, propertyReg);
2757     switch (elementSize(type)) {
2758     case 1:
2759         if (isSigned(type))
2760             m_jit.load8SignedExtendTo32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2761         else
2762             m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2763         break;
2764     case 2:
2765         if (isSigned(type))
2766             m_jit.load16SignedExtendTo32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2767         else
2768             m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2769         break;
2770     case 4:
2771         m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
2772         break;
2773     default:
2774         CRASH();
2775     }
2776     if (elementSize(type) < 4 || isSigned(type)) {
2777         int32Result(resultReg, node);
2778         return;
2779     }
2780     
2781     ASSERT(elementSize(type) == 4 && !isSigned(type));
2782     if (node->shouldSpeculateInt32()) {
2783         speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, resultReg, TrustedImm32(0)));
2784         int32Result(resultReg, node);
2785         return;
2786     }
2787     
2788 #if USE(JSVALUE64)
2789     if (node->shouldSpeculateAnyInt()) {
2790         m_jit.zeroExtend32ToPtr(resultReg, resultReg);
2791         strictInt52Result(resultReg, node);
2792         return;
2793     }
2794 #endif
2795     
2796     FPRTemporary fresult(this);
2797     m_jit.convertInt32ToDouble(resultReg, fresult.fpr());
2798     JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, resultReg, TrustedImm32(0));
2799     m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), fresult.fpr());
2800     positive.link(&m_jit);
2801     doubleResult(fresult.fpr(), node);
2802 }
2803
2804 void SpeculativeJIT::compilePutByValForIntTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
2805 {
2806     ASSERT(isInt(type));
2807     
2808     StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
2809     GPRReg storageReg = storage.gpr();
2810     
2811     Edge valueUse = m_jit.graph().varArgChild(node, 2);
2812     
2813     GPRTemporary value;
2814 #if USE(JSVALUE32_64)
2815     GPRTemporary propertyTag;
2816     GPRTemporary valueTag;
2817 #endif
2818
2819     GPRReg valueGPR = InvalidGPRReg;
2820 #if USE(JSVALUE32_64)
2821     GPRReg propertyTagGPR = InvalidGPRReg;
2822     GPRReg valueTagGPR = InvalidGPRReg;
2823 #endif
2824
2825     JITCompiler::JumpList slowPathCases;
2826     
2827     bool isAppropriateConstant = false;
2828     if (valueUse->isConstant()) {
2829         JSValue jsValue = valueUse->asJSValue();
2830         SpeculatedType expectedType = typeFilterFor(valueUse.useKind());
2831         SpeculatedType actualType = speculationFromValue(jsValue);
2832         isAppropriateConstant = (expectedType | actualType) == expectedType;
2833     }
2834
2835     if (isAppropriateConstant) {
2836         JSValue jsValue = valueUse->asJSValue();
2837         if (!jsValue.isNumber()) {
2838             terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
2839             noResult(node);
2840             return;
2841         }
2842         double d = jsValue.asNumber();
2843         if (isClamped(type)) {
2844             ASSERT(elementSize(type) == 1);
2845             d = clampDoubleToByte(d);
2846         }
2847         GPRTemporary scratch(this);
2848         GPRReg scratchReg = scratch.gpr();
2849         m_jit.move(Imm32(toInt32(d)), scratchReg);
2850         value.adopt(scratch);
2851         valueGPR = scratchReg;
2852     } else {
2853         switch (valueUse.useKind()) {
2854         case Int32Use: {
2855             SpeculateInt32Operand valueOp(this, valueUse);
2856             GPRTemporary scratch(this);
2857             GPRReg scratchReg = scratch.gpr();
2858             m_jit.move(valueOp.gpr(), scratchReg);
2859             if (isClamped(type)) {
2860                 ASSERT(elementSize(type) == 1);
2861                 compileClampIntegerToByte(m_jit, scratchReg);
2862             }
2863             value.adopt(scratch);
2864             valueGPR = scratchReg;
2865             break;
2866         }
2867             
2868 #if USE(JSVALUE64)
2869         case Int52RepUse: {
2870             SpeculateStrictInt52Operand valueOp(this, valueUse);
2871             GPRTemporary scratch(this);
2872             GPRReg scratchReg = scratch.gpr();
2873             m_jit.move(valueOp.gpr(), scratchReg);
2874             if (isClamped(type)) {
2875                 ASSERT(elementSize(type) == 1);
2876                 MacroAssembler::Jump inBounds = m_jit.branch64(
2877                     MacroAssembler::BelowOrEqual, scratchReg, JITCompiler::TrustedImm64(0xff));
2878                 MacroAssembler::Jump tooBig = m_jit.branch64(
2879                     MacroAssembler::GreaterThan, scratchReg, JITCompiler::TrustedImm64(0xff));
2880                 m_jit.move(TrustedImm32(0), scratchReg);
2881                 MacroAssembler::Jump clamped = m_jit.jump();
2882                 tooBig.link(&m_jit);
2883                 m_jit.move(JITCompiler::TrustedImm32(255), scratchReg);
2884                 clamped.link(&m_jit);
2885                 inBounds.link(&m_jit);
2886             }
2887             value.adopt(scratch);
2888             valueGPR = scratchReg;
2889             break;
2890         }
2891 #endif // USE(JSVALUE64)
2892             
2893         case DoubleRepUse: {
2894             if (isClamped(type)) {
2895                 ASSERT(elementSize(type) == 1);
2896                 SpeculateDoubleOperand valueOp(this, valueUse);
2897                 GPRTemporary result(this);
2898                 FPRTemporary floatScratch(this);
2899                 FPRReg fpr = valueOp.fpr();
2900                 GPRReg gpr = result.gpr();
2901                 compileClampDoubleToByte(m_jit, gpr, fpr, floatScratch.fpr());
2902                 value.adopt(result);
2903                 valueGPR = gpr;
2904             } else {
2905 #if USE(JSVALUE32_64)
2906                 GPRTemporary realPropertyTag(this);
2907                 propertyTag.adopt(realPropertyTag);
2908                 propertyTagGPR = propertyTag.gpr();
2909
2910                 GPRTemporary realValueTag(this);
2911                 valueTag.adopt(realValueTag);
2912                 valueTagGPR = valueTag.gpr();
2913 #endif
2914                 SpeculateDoubleOperand valueOp(this, valueUse);
2915                 GPRTemporary result(this);
2916                 FPRReg fpr = valueOp.fpr();
2917                 GPRReg gpr = result.gpr();
2918                 MacroAssembler::Jump notNaN = m_jit.branchDouble(MacroAssembler::DoubleEqual, fpr, fpr);
2919                 m_jit.xorPtr(gpr, gpr);
2920                 MacroAssembler::JumpList fixed(m_jit.jump());
2921                 notNaN.link(&m_jit);
2922
2923                 fixed.append(m_jit.branchTruncateDoubleToInt32(
2924                     fpr, gpr, MacroAssembler::BranchIfTruncateSuccessful));
2925
2926 #if USE(JSVALUE64)
2927                 m_jit.or64(GPRInfo::tagTypeNumberRegister, property);
2928                 boxDouble(fpr, gpr);
2929 #else
2930                 m_jit.move(TrustedImm32(JSValue::Int32Tag), propertyTagGPR);
2931                 boxDouble(fpr, valueTagGPR, gpr);
2932 #endif
2933                 slowPathCases.append(m_jit.jump());
2934
2935                 fixed.link(&m_jit);
2936                 value.adopt(result);
2937                 valueGPR = gpr;
2938             }
2939             break;
2940         }
2941             
2942         default:
2943             RELEASE_ASSERT_NOT_REACHED();
2944             break;
2945         }
2946     }
2947     
2948     ASSERT_UNUSED(valueGPR, valueGPR != property);
2949     ASSERT(valueGPR != base);
2950     ASSERT(valueGPR != storageReg);
2951     JITCompiler::Jump outOfBounds = jumpForTypedArrayOutOfBounds(node, base, property);
2952
2953     switch (elementSize(type)) {
2954     case 1:
2955         m_jit.store8(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesOne));
2956         break;
2957     case 2:
2958         m_jit.store16(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesTwo));
2959         break;
2960     case 4:
2961         m_jit.store32(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
2962         break;
2963     default:
2964         CRASH();
2965     }
2966
2967     JITCompiler::Jump done = jumpForTypedArrayIsNeuteredIfOutOfBounds(node, base, outOfBounds);
2968     if (done.isSet())
2969         done.link(&m_jit);
2970
2971     if (!slowPathCases.empty()) {
2972 #if USE(JSVALUE64)
2973         if (node->op() == PutByValDirect) {
2974             addSlowPathGenerator(slowPathCall(
2975                 slowPathCases, this,
2976                 m_jit.isStrictModeFor(node->origin.semantic) ? operationPutByValDirectStrict : operationPutByValDirectNonStrict,
2977                 NoResult, base, property, valueGPR));
2978         } else {
2979             addSlowPathGenerator(slowPathCall(
2980                 slowPathCases, this,
2981                 m_jit.isStrictModeFor(node->origin.semantic) ? operationPutByValStrict : operationPutByValNonStrict,
2982                 NoResult, base, property, valueGPR));
2983         }
2984 #else // not USE(JSVALUE64)
2985         if (node->op() == PutByValDirect) {
2986             addSlowPathGenerator(slowPathCall(
2987                 slowPathCases, this,
2988                 m_jit.codeBlock()->isStrictMode() ? operationPutByValDirectCellStrict : operationPutByValDirectCellNonStrict,
2989                 NoResult, base, JSValueRegs(propertyTagGPR, property), JSValueRegs(valueTagGPR, valueGPR)));
2990         } else {
2991             addSlowPathGenerator(slowPathCall(
2992                 slowPathCases, this,
2993                 m_jit.codeBlock()->isStrictMode() ? operationPutByValCellStrict : operationPutByValCellNonStrict,
2994                 NoResult, base, JSValueRegs(propertyTagGPR, property), JSValueRegs(valueTagGPR, valueGPR)));
2995         }
2996 #endif
2997     }
2998     noResult(node);
2999 }
3000
3001 void SpeculativeJIT::compileGetByValOnFloatTypedArray(Node* node, TypedArrayType type)
3002 {
3003     ASSERT(isFloat(type));
3004     
3005     SpeculateCellOperand base(this, node->child1());
3006     SpeculateStrictInt32Operand property(this, node->child2());
3007     StorageOperand storage(this, node->child3());
3008
3009     GPRReg baseReg = base.gpr();
3010     GPRReg propertyReg = property.gpr();
3011     GPRReg storageReg = storage.gpr();
3012
3013     ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
3014
3015     FPRTemporary result(this);
3016     FPRReg resultReg = result.fpr();
3017     emitTypedArrayBoundsCheck(node, baseReg, propertyReg);
3018     switch (elementSize(type)) {
3019     case 4:
3020         m_jit.loadFloat(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
3021         m_jit.convertFloatToDouble(resultReg, resultReg);
3022         break;
3023     case 8: {
3024         m_jit.loadDouble(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight), resultReg);
3025         break;
3026     }
3027     default:
3028         RELEASE_ASSERT_NOT_REACHED();
3029     }
3030     
3031     doubleResult(resultReg, node);
3032 }
3033
3034 void SpeculativeJIT::compilePutByValForFloatTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
3035 {
3036     ASSERT(isFloat(type));
3037     
3038     StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
3039     GPRReg storageReg = storage.gpr();
3040     
3041     Edge baseUse = m_jit.graph().varArgChild(node, 0);
3042     Edge valueUse = m_jit.graph().varArgChild(node, 2);
3043
3044     SpeculateDoubleOperand valueOp(this, valueUse);
3045     FPRTemporary scratch(this);
3046     FPRReg valueFPR = valueOp.fpr();
3047     FPRReg scratchFPR = scratch.fpr();
3048
3049     ASSERT_UNUSED(baseUse, node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(baseUse)));
3050     
3051     MacroAssembler::Jump outOfBounds = jumpForTypedArrayOutOfBounds(node, base, property);
3052     
3053     switch (elementSize(type)) {
3054     case 4: {
3055         m_jit.moveDouble(valueFPR, scratchFPR);
3056         m_jit.convertDoubleToFloat(valueFPR, scratchFPR);
3057         m_jit.storeFloat(scratchFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
3058         break;
3059     }
3060     case 8:
3061         m_jit.storeDouble(valueFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesEight));
3062         break;
3063     default:
3064         RELEASE_ASSERT_NOT_REACHED();
3065     }
3066
3067     JITCompiler::Jump done = jumpForTypedArrayIsNeuteredIfOutOfBounds(node, base, outOfBounds);
3068     if (done.isSet())
3069         done.link(&m_jit);
3070     noResult(node);
3071 }
3072
3073 void SpeculativeJIT::compileInstanceOfForObject(Node*, GPRReg valueReg, GPRReg prototypeReg, GPRReg scratchReg, GPRReg scratch2Reg)
3074 {
3075     // Check that prototype is an object.
3076     speculationCheck(BadType, JSValueRegs(), 0, m_jit.branchIfNotObject(prototypeReg));
3077     
3078     // Initialize scratchReg with the value being checked.
3079     m_jit.move(valueReg, scratchReg);
3080     
3081     // Walk up the prototype chain of the value (in scratchReg), comparing to prototypeReg.
3082     MacroAssembler::Label loop(&m_jit);
3083     MacroAssembler::Jump performDefaultHasInstance = m_jit.branch8(MacroAssembler::Equal,
3084         MacroAssembler::Address(scratchReg, JSCell::typeInfoTypeOffset()), TrustedImm32(ProxyObjectType));
3085     m_jit.emitLoadStructure(scratchReg, scratchReg, scratch2Reg);
3086     m_jit.loadPtr(MacroAssembler::Address(scratchReg, Structure::prototypeOffset() + CellPayloadOffset), scratchReg);
3087     MacroAssembler::Jump isInstance = m_jit.branchPtr(MacroAssembler::Equal, scratchReg, prototypeReg);
3088 #if USE(JSVALUE64)
3089     m_jit.branchIfCell(JSValueRegs(scratchReg)).linkTo(loop, &m_jit);
3090 #else
3091     m_jit.branchTestPtr(MacroAssembler::NonZero, scratchReg).linkTo(loop, &m_jit);
3092 #endif
3093     
3094     // No match - result is false.
3095 #if USE(JSVALUE64)
3096     m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(false))), scratchReg);
3097 #else
3098     m_jit.move(MacroAssembler::TrustedImm32(0), scratchReg);
3099 #endif
3100     MacroAssembler::JumpList doneJumps; 
3101     doneJumps.append(m_jit.jump());
3102
3103     performDefaultHasInstance.link(&m_jit);
3104     silentSpillAllRegisters(scratchReg);
3105     callOperation(operationDefaultHasInstance, scratchReg, valueReg, prototypeReg); 
3106     silentFillAllRegisters(scratchReg);
3107     m_jit.exceptionCheck();
3108 #if USE(JSVALUE64)
3109     m_jit.or32(TrustedImm32(ValueFalse), scratchReg);
3110 #endif
3111     doneJumps.append(m_jit.jump());
3112     
3113     isInstance.link(&m_jit);
3114 #if USE(JSVALUE64)
3115     m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(true))), scratchReg);
3116 #else
3117     m_jit.move(MacroAssembler::TrustedImm32(1), scratchReg);
3118 #endif
3119     
3120     doneJumps.link(&m_jit);
3121 }
3122
3123 void SpeculativeJIT::compileCheckTypeInfoFlags(Node* node)
3124 {
3125     SpeculateCellOperand base(this, node->child1());
3126
3127     GPRReg baseGPR = base.gpr();
3128
3129     speculationCheck(BadTypeInfoFlags, JSValueRegs(), 0, m_jit.branchTest8(MacroAssembler::Zero, MacroAssembler::Address(baseGPR, JSCell::typeInfoFlagsOffset()), MacroAssembler::TrustedImm32(node->typeInfoOperand())));
3130
3131     noResult(node);
3132 }
3133
3134 void SpeculativeJIT::compileParseInt(Node* node)
3135 {
3136     RELEASE_ASSERT(node->child1().useKind() == UntypedUse || node->child1().useKind() == StringUse);
3137
3138     GPRFlushedCallResult resultPayload(this);
3139     GPRReg resultPayloadGPR = resultPayload.gpr();
3140 #if USE(JSVALUE64)
3141     JSValueRegs resultRegs(resultPayloadGPR);
3142 #else
3143     GPRFlushedCallResult2 resultTag(this);
3144     GPRReg resultTagGPR = resultTag.gpr();
3145     JSValueRegs resultRegs(resultTagGPR, resultPayloadGPR);
3146 #endif
3147
3148     if (node->child2()) {
3149         SpeculateInt32Operand radix(this, node->child2());
3150         GPRReg radixGPR = radix.gpr();
3151         if (node->child1().useKind() == UntypedUse) {
3152             JSValueOperand value(this, node->child1());
3153
3154             flushRegisters();
3155 #if USE(JSVALUE64)
3156             callOperation(operationParseIntGeneric, resultRegs.gpr(), value.gpr(), radixGPR);
3157 #else
3158             callOperation(operationParseIntGeneric, resultRegs, value.jsValueRegs(), radixGPR);
3159 #endif
3160             m_jit.exceptionCheck();
3161         } else {
3162             SpeculateCellOperand value(this, node->child1());
3163             GPRReg valueGPR = value.gpr();
3164             speculateString(node->child1(), valueGPR);
3165
3166             flushRegisters();
3167 #if USE(JSVALUE64)
3168             callOperation(operationParseIntString, resultRegs.gpr(), valueGPR, radixGPR);
3169 #else
3170             callOperation(operationParseIntString, resultRegs, valueGPR, radixGPR);
3171 #endif
3172             m_jit.exceptionCheck();
3173         }
3174     } else {
3175         if (node->child1().useKind() == UntypedUse) {
3176             JSValueOperand value(this, node->child1());
3177
3178             flushRegisters();
3179 #if USE(JSVALUE64)
3180             callOperation(operationParseIntNoRadixGeneric, resultRegs.gpr(), value.jsValueRegs());
3181 #else
3182             callOperation(operationParseIntNoRadixGeneric, resultRegs, value.jsValueRegs());
3183 #endif
3184             m_jit.exceptionCheck();
3185         } else {
3186             SpeculateCellOperand value(this, node->child1());
3187             GPRReg valueGPR = value.gpr();
3188             speculateString(node->child1(), valueGPR);
3189
3190             flushRegisters();
3191             callOperation(operationParseIntStringNoRadix, resultRegs, valueGPR);
3192             m_jit.exceptionCheck();
3193         }
3194     }
3195
3196     jsValueResult(resultRegs, node);
3197 }
3198
3199 void SpeculativeJIT::compileInstanceOf(Node* node)
3200 {
3201     if (node->child1().useKind() == UntypedUse) {
3202         // It might not be a cell. Speculate less aggressively.
3203         // Or: it might only be used once (i.e. by us), so we get zero benefit
3204         // from speculating any more aggressively than we absolutely need to.
3205         
3206         JSValueOperand value(this, node->child1());
3207         SpeculateCellOperand prototype(this, node->child2());
3208         GPRTemporary scratch(this);
3209         GPRTemporary scratch2(this);
3210         
3211         GPRReg prototypeReg = prototype.gpr();
3212         GPRReg scratchReg = scratch.gpr();
3213         GPRReg scratch2Reg = scratch2.gpr();
3214         
3215         MacroAssembler::Jump isCell = m_jit.branchIfCell(value.jsValueRegs());
3216         GPRReg valueReg = value.jsValueRegs().payloadGPR();
3217         moveFalseTo(scratchReg);
3218
3219         MacroAssembler::Jump done = m_jit.jump();
3220         
3221         isCell.link(&m_jit);
3222         
3223         compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg, scratch2Reg);
3224         
3225         done.link(&m_jit);
3226
3227         blessedBooleanResult(scratchReg, node);
3228         return;
3229     }
3230     
3231     SpeculateCellOperand value(this, node->child1());
3232     SpeculateCellOperand prototype(this, node->child2());
3233     
3234     GPRTemporary scratch(this);
3235     GPRTemporary scratch2(this);
3236     
3237     GPRReg valueReg = value.gpr();
3238     GPRReg prototypeReg = prototype.gpr();
3239     GPRReg scratchReg = scratch.gpr();
3240     GPRReg scratch2Reg = scratch2.gpr();
3241     
3242     compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg, scratch2Reg);
3243
3244     blessedBooleanResult(scratchReg, node);
3245 }
3246
3247 template<typename SnippetGenerator, J_JITOperation_EJJ snippetSlowPathFunction>
3248 void SpeculativeJIT::emitUntypedBitOp(Node* node)
3249 {
3250     Edge& leftChild = node->child1();
3251     Edge& rightChild = node->child2();
3252
3253     if (isKnownNotNumber(leftChild.node()) || isKnownNotNumber(rightChild.node())) {
3254         JSValueOperand left(this, leftChild);
3255         JSValueOperand right(this, rightChild);
3256         JSValueRegs leftRegs = left.jsValueRegs();
3257         JSValueRegs rightRegs = right.jsValueRegs();
3258 #if USE(JSVALUE64)
3259         GPRTemporary result(this);
3260         JSValueRegs resultRegs = JSValueRegs(result.gpr());
3261 #else
3262         GPRTemporary resultTag(this);
3263         GPRTemporary resultPayload(this);
3264         JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3265 #endif
3266         flushRegisters();
3267         callOperation(snippetSlowPathFunction, resultRegs, leftRegs, rightRegs);
3268         m_jit.exceptionCheck();
3269
3270         jsValueResult(resultRegs, node);
3271         return;
3272     }
3273
3274     std::optional<JSValueOperand> left;
3275     std::optional<JSValueOperand> right;
3276
3277     JSValueRegs leftRegs;
3278     JSValueRegs rightRegs;
3279
3280 #if USE(JSVALUE64)
3281     GPRTemporary result(this);
3282     JSValueRegs resultRegs = JSValueRegs(result.gpr());
3283     GPRTemporary scratch(this);
3284     GPRReg scratchGPR = scratch.gpr();
3285 #else
3286     GPRTemporary resultTag(this);
3287     GPRTemporary resultPayload(this);
3288     JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3289     GPRReg scratchGPR = resultTag.gpr();
3290 #endif
3291
3292     SnippetOperand leftOperand;
3293     SnippetOperand rightOperand;
3294
3295     // The snippet generator does not support both operands being constant. If the left
3296     // operand is already const, we'll ignore the right operand's constness.
3297     if (leftChild->isInt32Constant())
3298         leftOperand.setConstInt32(leftChild->asInt32());
3299     else if (rightChild->isInt32Constant())
3300         rightOperand.setConstInt32(rightChild->asInt32());
3301
3302     RELEASE_ASSERT(!leftOperand.isConst() || !rightOperand.isConst());
3303
3304     if (!leftOperand.isConst()) {
3305         left.emplace(this, leftChild);
3306         leftRegs = left->jsValueRegs();
3307     }
3308     if (!rightOperand.isConst()) {
3309         right.emplace(this, rightChild);
3310         rightRegs = right->jsValueRegs();
3311     }
3312
3313     SnippetGenerator gen(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs, scratchGPR);
3314     gen.generateFastPath(m_jit);
3315
3316     ASSERT(gen.didEmitFastPath());
3317     gen.endJumpList().append(m_jit.jump());
3318
3319     gen.slowPathJumpList().link(&m_jit);
3320     silentSpillAllRegisters(resultRegs);
3321
3322     if (leftOperand.isConst()) {
3323         leftRegs = resultRegs;
3324         m_jit.moveValue(leftChild->asJSValue(), leftRegs);
3325     } else if (rightOperand.isConst()) {
3326         rightRegs = resultRegs;
3327         m_jit.moveValue(rightChild->asJSValue(), rightRegs);
3328     }
3329
3330     callOperation(snippetSlowPathFunction, resultRegs, leftRegs, rightRegs);
3331
3332     silentFillAllRegisters(resultRegs);
3333     m_jit.exceptionCheck();
3334
3335     gen.endJumpList().link(&m_jit);
3336     jsValueResult(resultRegs, node);
3337 }
3338
3339 void SpeculativeJIT::compileBitwiseOp(Node* node)
3340 {
3341     NodeType op = node->op();
3342     Edge& leftChild = node->child1();
3343     Edge& rightChild = node->child2();
3344
3345     if (leftChild.useKind() == UntypedUse || rightChild.useKind() == UntypedUse) {
3346         switch (op) {
3347         case BitAnd:
3348             emitUntypedBitOp<JITBitAndGenerator, operationValueBitAnd>(node);
3349             return;
3350         case BitOr:
3351             emitUntypedBitOp<JITBitOrGenerator, operationValueBitOr>(node);
3352             return;
3353         case BitXor:
3354             emitUntypedBitOp<JITBitXorGenerator, operationValueBitXor>(node);
3355             return;
3356         default:
3357             RELEASE_ASSERT_NOT_REACHED();
3358         }
3359     }
3360
3361     if (leftChild->isInt32Constant()) {
3362         SpeculateInt32Operand op2(this, rightChild);
3363         GPRTemporary result(this, Reuse, op2);
3364
3365         bitOp(op, leftChild->asInt32(), op2.gpr(), result.gpr());
3366
3367         int32Result(result.gpr(), node);
3368
3369     } else if (rightChild->isInt32Constant()) {
3370         SpeculateInt32Operand op1(this, leftChild);
3371         GPRTemporary result(this, Reuse, op1);
3372
3373         bitOp(op, rightChild->asInt32(), op1.gpr(), result.gpr());
3374
3375         int32Result(result.gpr(), node);
3376
3377     } else {
3378         SpeculateInt32Operand op1(this, leftChild);
3379         SpeculateInt32Operand op2(this, rightChild);
3380         GPRTemporary result(this, Reuse, op1, op2);
3381         
3382         GPRReg reg1 = op1.gpr();
3383         GPRReg reg2 = op2.gpr();
3384         bitOp(op, reg1, reg2, result.gpr());
3385         
3386         int32Result(result.gpr(), node);
3387     }
3388 }
3389
3390 void SpeculativeJIT::emitUntypedRightShiftBitOp(Node* node)
3391 {
3392     J_JITOperation_EJJ snippetSlowPathFunction = node->op() == BitRShift
3393         ? operationValueBitRShift : operationValueBitURShift;
3394     JITRightShiftGenerator::ShiftType shiftType = node->op() == BitRShift
3395         ? JITRightShiftGenerator::SignedShift : JITRightShiftGenerator::UnsignedShift;
3396
3397     Edge& leftChild = node->child1();
3398     Edge& rightChild = node->child2();
3399
3400     if (isKnownNotNumber(leftChild.node()) || isKnownNotNumber(rightChild.node())) {
3401         JSValueOperand left(this, leftChild);
3402         JSValueOperand right(this, rightChild);
3403         JSValueRegs leftRegs = left.jsValueRegs();
3404         JSValueRegs rightRegs = right.jsValueRegs();
3405 #if USE(JSVALUE64)
3406         GPRTemporary result(this);
3407         JSValueRegs resultRegs = JSValueRegs(result.gpr());
3408 #else
3409         GPRTemporary resultTag(this);
3410         GPRTemporary resultPayload(this);
3411         JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3412 #endif
3413         flushRegisters();
3414         callOperation(snippetSlowPathFunction, resultRegs, leftRegs, rightRegs);
3415         m_jit.exceptionCheck();
3416
3417         jsValueResult(resultRegs, node);
3418         return;
3419     }
3420
3421     std::optional<JSValueOperand> left;
3422     std::optional<JSValueOperand> right;
3423
3424     JSValueRegs leftRegs;
3425     JSValueRegs rightRegs;
3426
3427     FPRTemporary leftNumber(this);
3428     FPRReg leftFPR = leftNumber.fpr();
3429
3430 #if USE(JSVALUE64)
3431     GPRTemporary result(this);
3432     JSValueRegs resultRegs = JSValueRegs(result.gpr());
3433     GPRTemporary scratch(this);
3434     GPRReg scratchGPR = scratch.gpr();
3435     FPRReg scratchFPR = InvalidFPRReg;
3436 #else
3437     GPRTemporary resultTag(this);
3438     GPRTemporary resultPayload(this);
3439     JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3440     GPRReg scratchGPR = resultTag.gpr();
3441     FPRTemporary fprScratch(this);
3442     FPRReg scratchFPR = fprScratch.fpr();
3443 #endif
3444
3445     SnippetOperand leftOperand;
3446     SnippetOperand rightOperand;
3447
3448     // The snippet generator does not support both operands being constant. If the left
3449     // operand is already const, we'll ignore the right operand's constness.
3450     if (leftChild->isInt32Constant())
3451         leftOperand.setConstInt32(leftChild->asInt32());
3452     else if (rightChild->isInt32Constant())
3453         rightOperand.setConstInt32(rightChild->asInt32());
3454
3455     RELEASE_ASSERT(!leftOperand.isConst() || !rightOperand.isConst());
3456
3457     if (!leftOperand.isConst()) {
3458         left.emplace(this, leftChild);
3459         leftRegs = left->jsValueRegs();
3460     }
3461     if (!rightOperand.isConst()) {
3462         right.emplace(this, rightChild);
3463         rightRegs = right->jsValueRegs();
3464     }
3465
3466     JITRightShiftGenerator gen(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs,
3467         leftFPR, scratchGPR, scratchFPR, shiftType);
3468     gen.generateFastPath(m_jit);
3469
3470     ASSERT(gen.didEmitFastPath());
3471     gen.endJumpList().append(m_jit.jump());
3472
3473     gen.slowPathJumpList().link(&m_jit);
3474     silentSpillAllRegisters(resultRegs);
3475
3476     if (leftOperand.isConst()) {
3477         leftRegs = resultRegs;
3478         m_jit.moveValue(leftChild->asJSValue(), leftRegs);
3479     } else if (rightOperand.isConst()) {
3480         rightRegs = resultRegs;
3481         m_jit.moveValue(rightChild->asJSValue(), rightRegs);
3482     }
3483
3484     callOperation(snippetSlowPathFunction, resultRegs, leftRegs, rightRegs);
3485
3486     silentFillAllRegisters(resultRegs);
3487     m_jit.exceptionCheck();
3488
3489     gen.endJumpList().link(&m_jit);
3490     jsValueResult(resultRegs, node);
3491     return;
3492 }
3493
3494 void SpeculativeJIT::compileShiftOp(Node* node)
3495 {
3496     NodeType op = node->op();
3497     Edge& leftChild = node->child1();
3498     Edge& rightChild = node->child2();
3499
3500     if (leftChild.useKind() == UntypedUse || rightChild.useKind() == UntypedUse) {
3501         switch (op) {
3502         case BitLShift:
3503             emitUntypedBitOp<JITLeftShiftGenerator, operationValueBitLShift>(node);
3504             return;
3505         case BitRShift:
3506         case BitURShift:
3507             emitUntypedRightShiftBitOp(node);
3508             return;
3509         default:
3510             RELEASE_ASSERT_NOT_REACHED();
3511         }
3512     }
3513
3514     if (rightChild->isInt32Constant()) {
3515         SpeculateInt32Operand op1(this, leftChild);
3516         GPRTemporary result(this, Reuse, op1);
3517
3518         shiftOp(op, op1.gpr(), rightChild->asInt32() & 0x1f, result.gpr());
3519
3520         int32Result(result.gpr(), node);
3521     } else {
3522         // Do not allow shift amount to be used as the result, MacroAssembler does not permit this.
3523         SpeculateInt32Operand op1(this, leftChild);
3524         SpeculateInt32Operand op2(this, rightChild);
3525         GPRTemporary result(this, Reuse, op1);
3526
3527         GPRReg reg1 = op1.gpr();
3528         GPRReg reg2 = op2.gpr();
3529         shiftOp(op, reg1, reg2, result.gpr());
3530
3531         int32Result(result.gpr(), node);
3532     }
3533 }
3534
3535 void SpeculativeJIT::compileValueAdd(Node* node)
3536 {
3537     Edge& leftChild = node->child1();
3538     Edge& rightChild = node->child2();
3539
3540     if (isKnownNotNumber(leftChild.node()) || isKnownNotNumber(rightChild.node())) {
3541         JSValueOperand left(this, leftChild);
3542         JSValueOperand right(this, rightChild);
3543         JSValueRegs leftRegs = left.jsValueRegs();
3544         JSValueRegs rightRegs = right.jsValueRegs();
3545 #if USE(JSVALUE64)
3546         GPRTemporary result(this);
3547         JSValueRegs resultRegs = JSValueRegs(result.gpr());
3548 #else
3549         GPRTemporary resultTag(this);
3550         GPRTemporary resultPayload(this);
3551         JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3552 #endif
3553         flushRegisters();
3554         callOperation(operationValueAddNotNumber, resultRegs, leftRegs, rightRegs);
3555         m_jit.exceptionCheck();
3556     
3557         jsValueResult(resultRegs, node);
3558         return;
3559     }
3560
3561 #if USE(JSVALUE64)
3562     bool needsScratchGPRReg = true;
3563     bool needsScratchFPRReg = false;
3564 #else
3565     bool needsScratchGPRReg = true;
3566     bool needsScratchFPRReg = true;
3567 #endif
3568
3569     ArithProfile* arithProfile = m_jit.graph().baselineCodeBlockFor(node->origin.semantic)->arithProfileForBytecodeOffset(node->origin.semantic.bytecodeIndex);
3570     JITAddIC* addIC = m_jit.codeBlock()->addJITAddIC(arithProfile);
3571     auto repatchingFunction = operationValueAddOptimize;
3572     auto nonRepatchingFunction = operationValueAdd;
3573     
3574     compileMathIC(node, addIC, needsScratchGPRReg, needsScratchFPRReg, repatchingFunction, nonRepatchingFunction);
3575 }
3576
3577 template <typename Generator, typename RepatchingFunction, typename NonRepatchingFunction>
3578 void SpeculativeJIT::compileMathIC(Node* node, JITBinaryMathIC<Generator>* mathIC, bool needsScratchGPRReg, bool needsScratchFPRReg, RepatchingFunction repatchingFunction, NonRepatchingFunction nonRepatchingFunction)
3579 {
3580     Edge& leftChild = node->child1();
3581     Edge& rightChild = node->child2();
3582
3583     std::optional<JSValueOperand> left;
3584     std::optional<JSValueOperand> right;
3585
3586     JSValueRegs leftRegs;
3587     JSValueRegs rightRegs;
3588
3589     FPRTemporary leftNumber(this);
3590     FPRTemporary rightNumber(this);
3591     FPRReg leftFPR = leftNumber.fpr();
3592     FPRReg rightFPR = rightNumber.fpr();
3593
3594     GPRReg scratchGPR = InvalidGPRReg;
3595     FPRReg scratchFPR = InvalidFPRReg;
3596
3597     std::optional<FPRTemporary> fprScratch;
3598     if (needsScratchFPRReg) {
3599         fprScratch.emplace(this);
3600         scratchFPR = fprScratch->fpr();
3601     }
3602
3603 #if USE(JSVALUE64)
3604     std::optional<GPRTemporary> gprScratch;
3605     if (needsScratchGPRReg) {
3606         gprScratch.emplace(this);
3607         scratchGPR = gprScratch->gpr();
3608     }
3609     GPRTemporary result(this);
3610     JSValueRegs resultRegs = JSValueRegs(result.gpr());
3611 #else
3612     GPRTemporary resultTag(this);
3613     GPRTemporary resultPayload(this);
3614     JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3615     if (needsScratchGPRReg)
3616         scratchGPR = resultRegs.tagGPR();
3617 #endif
3618
3619     SnippetOperand leftOperand(m_state.forNode(leftChild).resultType());
3620     SnippetOperand rightOperand(m_state.forNode(rightChild).resultType());
3621
3622     // The snippet generator does not support both operands being constant. If the left
3623     // operand is already const, we'll ignore the right operand's constness.
3624     if (leftChild->isInt32Constant())
3625         leftOperand.setConstInt32(leftChild->asInt32());
3626     else if (rightChild->isInt32Constant())
3627         rightOperand.setConstInt32(rightChild->asInt32());
3628
3629     ASSERT(!leftOperand.isConst() || !rightOperand.isConst());
3630     ASSERT(!(Generator::isLeftOperandValidConstant(leftOperand) && Generator::isRightOperandValidConstant(rightOperand)));
3631
3632     if (!Generator::isLeftOperandValidConstant(leftOperand)) {
3633         left.emplace(this, leftChild);
3634         leftRegs = left->jsValueRegs();
3635     }
3636     if (!Generator::isRightOperandValidConstant(rightOperand)) {
3637         right.emplace(this, rightChild);
3638         rightRegs = right->jsValueRegs();
3639     }
3640
3641 #if ENABLE(MATH_IC_STATS)
3642     auto inlineStart = m_jit.label();
3643 #endif
3644
3645     Box<MathICGenerationState> addICGenerationState = Box<MathICGenerationState>::create();
3646     mathIC->m_generator = Generator(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs, leftFPR, rightFPR, scratchGPR, scratchFPR);
3647
3648     bool shouldEmitProfiling = false;
3649     bool generatedInline = mathIC->generateInline(m_jit, *addICGenerationState, shouldEmitProfiling);
3650
3651     if (generatedInline) {
3652         ASSERT(!addICGenerationState->slowPathJumps.empty());
3653
3654         Vector<SilentRegisterSavePlan> savePlans;
3655         silentSpillAllRegistersImpl(false, savePlans, resultRegs);
3656
3657         auto done = m_jit.label();
3658
3659         addSlowPathGenerator([=, savePlans = WTFMove(savePlans)] () {
3660             addICGenerationState->slowPathJumps.link(&m_jit);
3661             addICGenerationState->slowPathStart = m_jit.label();
3662 #if ENABLE(MATH_IC_STATS)
3663             auto slowPathStart = m_jit.label();
3664 #endif
3665
3666             silentSpill(savePlans);
3667
3668             auto innerLeftRegs = leftRegs;
3669             auto innerRightRegs = rightRegs;
3670             if (Generator::isLeftOperandValidConstant(leftOperand)) {
3671                 innerLeftRegs = resultRegs;
3672                 m_jit.moveValue(leftChild->asJSValue(), innerLeftRegs);
3673             } else if (Generator::isRightOperandValidConstant(rightOperand)) {
3674                 innerRightRegs = resultRegs;
3675                 m_jit.moveValue(rightChild->asJSValue(), innerRightRegs);
3676             }
3677
3678             if (addICGenerationState->shouldSlowPathRepatch)
3679                 addICGenerationState->slowPathCall = callOperation(bitwise_cast<J_JITOperation_EJJMic>(repatchingFunction), resultRegs, innerLeftRegs, innerRightRegs, TrustedImmPtr(mathIC));
3680             else
3681                 addICGenerationState->slowPathCall = callOperation(nonRepatchingFunction, resultRegs, innerLeftRegs, innerRightRegs);
3682
3683             silentFill(savePlans);
3684             m_jit.exceptionCheck();
3685             m_jit.jump().linkTo(done, &m_jit);
3686
3687             m_jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
3688                 mathIC->finalizeInlineCode(*addICGenerationState, linkBuffer);
3689             });
3690
3691 #if ENABLE(MATH_IC_STATS)
3692             auto slowPathEnd = m_jit.label();
3693             m_jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
3694                 size_t size = static_cast<char*>(linkBuffer.locationOf(slowPathEnd).executableAddress()) - static_cast<char*>(linkBuffer.locationOf(slowPathStart).executableAddress());
3695                 mathIC->m_generatedCodeSize += size;
3696             });
3697 #endif
3698
3699         });
3700     } else {
3701         if (Generator::isLeftOperandValidConstant(leftOperand)) {
3702             left.emplace(this, leftChild);
3703             leftRegs = left->jsValueRegs();
3704         } else if (Generator::isRightOperandValidConstant(rightOperand)) {
3705             right.emplace(this, rightChild);
3706             rightRegs = right->jsValueRegs();
3707         }
3708
3709         flushRegisters();
3710         callOperation(nonRepatchingFunction, resultRegs, leftRegs, rightRegs);
3711         m_jit.exceptionCheck();
3712     }
3713
3714 #if ENABLE(MATH_IC_STATS)
3715     auto inlineEnd = m_jit.label();
3716     m_jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
3717         size_t size = static_cast<char*>(linkBuffer.locationOf(inlineEnd).executableAddress()) - static_cast<char*>(linkBuffer.locationOf(inlineStart).executableAddress());
3718         mathIC->m_generatedCodeSize += size;
3719     });
3720 #endif
3721
3722     jsValueResult(resultRegs, node);
3723     return;
3724 }
3725
3726 void SpeculativeJIT::compileInstanceOfCustom(Node* node)
3727 {
3728     // We could do something smarter here but this case is currently super rare and unless
3729     // Symbol.hasInstance becomes popular will likely remain that way.
3730
3731     JSValueOperand value(this, node->child1());
3732     SpeculateCellOperand constructor(this, node->child2());
3733     JSValueOperand hasInstanceValue(this, node->child3());
3734     GPRTemporary result(this);
3735
3736     JSValueRegs valueRegs = value.jsValueRegs();
3737     GPRReg constructorGPR = constructor.gpr();
3738     JSValueRegs hasInstanceRegs = hasInstanceValue.jsValueRegs();
3739     GPRReg resultGPR = result.gpr();
3740
3741     MacroAssembler::Jump slowCase = m_jit.jump();
3742
3743     addSlowPathGenerator(slowPathCall(slowCase, this, operationInstanceOfCustom, resultGPR, valueRegs, constructorGPR, hasInstanceRegs));
3744
3745     unblessedBooleanResult(resultGPR, node);
3746 }
3747
3748 void SpeculativeJIT::compileIsCellWithType(Node* node)
3749 {
3750     switch (node->child1().useKind()) {
3751     case UntypedUse: {
3752         JSValueOperand value(this, node->child1());
3753 #if USE(JSVALUE64)
3754         GPRTemporary result(this, Reuse, value);
3755 #else
3756         GPRTemporary result(this, Reuse, value, PayloadWord);
3757 #endif
3758
3759         JSValueRegs valueRegs = value.jsValueRegs();
3760         GPRReg resultGPR = result.gpr();
3761
3762         JITCompiler::Jump isNotCell = m_jit.branchIfNotCell(valueRegs);
3763
3764         m_jit.compare8(JITCompiler::Equal,
3765             JITCompiler::Address(valueRegs.payloadGPR(), JSCell::typeInfoTypeOffset()),
3766             TrustedImm32(node->queriedType()),
3767             resultGPR);
3768         blessBoolean(resultGPR);
3769         JITCompiler::Jump done = m_jit.jump();
3770
3771         isNotCell.link(&m_jit);
3772         moveFalseTo(resultGPR);
3773
3774         done.link(&m_jit);
3775         blessedBooleanResult(resultGPR, node);
3776         return;
3777     }
3778
3779     case CellUse: {
3780         SpeculateCellOperand cell(this, node->child1());
3781         GPRTemporary result(this, Reuse, cell);
3782
3783         GPRReg cellGPR = cell.gpr();
3784         GPRReg resultGPR = result.gpr();
3785
3786         m_jit.compare8(JITCompiler::Equal,
3787             JITCompiler::Address(cellGPR, JSCell::typeInfoTypeOffset()),
3788             TrustedImm32(node->queriedType()),
3789             resultGPR);
3790         blessBoolean(resultGPR);
3791         blessedBooleanResult(resultGPR, node);
3792         return;
3793     }
3794
3795     default:
3796         RELEASE_ASSERT_NOT_REACHED();
3797         break;
3798     }
3799 }
3800
3801 void SpeculativeJIT::compileIsTypedArrayView(Node* node)
3802 {
3803     JSValueOperand value(this, node->child1());
3804 #if USE(JSVALUE64)
3805     GPRTemporary result(this, Reuse, value);
3806 #else
3807     GPRTemporary result(this, Reuse, value, PayloadWord);
3808 #endif
3809
3810     JSValueRegs valueRegs = value.jsValueRegs();
3811     GPRReg resultGPR = result.gpr();
3812
3813     JITCompiler::Jump isNotCell = m_jit.branchIfNotCell(valueRegs);
3814
3815     m_jit.load8(JITCompiler::Address(valueRegs.payloadGPR(), JSCell::typeInfoTypeOffset()), resultGPR);
3816     m_jit.sub32(TrustedImm32(Int8ArrayType), resultGPR);
3817     m_jit.compare32(JITCompiler::BelowOrEqual,
3818         resultGPR,
3819         TrustedImm32(Float64ArrayType - Int8ArrayType),
3820         resultGPR);
3821     blessBoolean(resultGPR);
3822     JITCompiler::Jump done = m_jit.jump();
3823
3824     isNotCell.link(&m_jit);
3825     moveFalseTo(resultGPR);
3826
3827     done.link(&m_jit);
3828     blessedBooleanResult(resultGPR, node);
3829 }
3830
3831 void SpeculativeJIT::compileCallObjectConstructor(Node* node)
3832 {
3833     RELEASE_ASSERT(node->child1().useKind() == UntypedUse);
3834     JSValueOperand value(this, node->child1());
3835 #if USE(JSVALUE64)
3836     GPRTemporary result(this, Reuse, value);
3837 #else
3838     GPRTemporary result(this, Reuse, value, PayloadWord);
3839 #endif
3840
3841     JSValueRegs valueRegs = value.jsValueRegs();
3842     GPRReg resultGPR = result.gpr();
3843
3844     MacroAssembler::JumpList slowCases;
3845     slowCases.append(m_jit.branchIfNotCell(valueRegs));
3846     slowCases.append(m_jit.branchIfNotObject(valueRegs.payloadGPR()));
3847     m_jit.move(valueRegs.payloadGPR(), resultGPR);
3848
3849     addSlowPathGenerator(slowPathCall(slowCases, this, operationObjectConstructor, resultGPR, m_jit.globalObjectFor(node->origin.semantic), valueRegs));
3850     cellResult(resultGPR, node);
3851 }
3852
3853 void SpeculativeJIT::compileArithAdd(Node* node)
3854 {
3855     switch (node->binaryUseKind()) {
3856     case Int32Use: {
3857         ASSERT(!shouldCheckNegativeZero(node->arithMode()));
3858
3859         if (node->child2()->isInt32Constant()) {
3860             SpeculateInt32Operand op1(this, node->child1());
3861             GPRTemporary result(this, Reuse, op1);
3862
3863             GPRReg gpr1 = op1.gpr();
3864             int32_t imm2 = node->child2()->asInt32();
3865             GPRReg gprResult = result.gpr();
3866
3867             if (!shouldCheckOverflow(node->arithMode())) {
3868                 m_jit.add32(Imm32(imm2), gpr1, gprResult);
3869                 int32Result(gprResult, node);
3870                 return;
3871             }
3872
3873             MacroAssembler::Jump check = m_jit.branchAdd32(MacroAssembler::Overflow, gpr1, Imm32(imm2), gprResult);
3874             if (gpr1 == gprResult) {
3875                 speculationCheck(Overflow, JSValueRegs(), 0, check,
3876                     SpeculationRecovery(SpeculativeAddImmediate, gpr1, imm2));
3877             } else
3878                 speculationCheck(Overflow, JSValueRegs(), 0, check);
3879
3880             int32Result(gprResult, node);
3881             return;
3882         }
3883                 
3884         SpeculateInt32Operand op1(this, node->child1());
3885         SpeculateInt32Operand op2(this, node->child2());
3886         GPRTemporary result(this, Reuse, op1, op2);
3887
3888         GPRReg gpr1 = op1.gpr();
3889         GPRReg gpr2 = op2.gpr();
3890         GPRReg gprResult = result.gpr();
3891
3892         if (!shouldCheckOverflow(node->arithMode()))
38