Bmalloc and GC should put auxiliaries (butterflies, typed array backing stores) in...
[WebKit-https.git] / Source / JavaScriptCore / dfg / DFGSpeculativeJIT.cpp
1 /*
2  * Copyright (C) 2011-2017 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "DFGSpeculativeJIT.h"
28
29 #if ENABLE(DFG_JIT)
30
31 #include "BinarySwitch.h"
32 #include "DFGAbstractInterpreterInlines.h"
33 #include "DFGArrayifySlowPathGenerator.h"
34 #include "DFGCallArrayAllocatorSlowPathGenerator.h"
35 #include "DFGCallCreateDirectArgumentsSlowPathGenerator.h"
36 #include "DFGCapabilities.h"
37 #include "DFGMayExit.h"
38 #include "DFGOSRExitFuzz.h"
39 #include "DFGSaneStringGetByValSlowPathGenerator.h"
40 #include "DFGSlowPathGenerator.h"
41 #include "DFGSnippetParams.h"
42 #include "DirectArguments.h"
43 #include "JITAddGenerator.h"
44 #include "JITBitAndGenerator.h"
45 #include "JITBitOrGenerator.h"
46 #include "JITBitXorGenerator.h"
47 #include "JITDivGenerator.h"
48 #include "JITLeftShiftGenerator.h"
49 #include "JITMulGenerator.h"
50 #include "JITRightShiftGenerator.h"
51 #include "JITSubGenerator.h"
52 #include "JSAsyncFunction.h"
53 #include "JSCInlines.h"
54 #include "JSEnvironmentRecord.h"
55 #include "JSFixedArray.h"
56 #include "JSGeneratorFunction.h"
57 #include "JSLexicalEnvironment.h"
58 #include "LinkBuffer.h"
59 #include "RegExpConstructor.h"
60 #include "ScopedArguments.h"
61 #include "ScratchRegisterAllocator.h"
62 #include <wtf/BitVector.h>
63 #include <wtf/Box.h>
64 #include <wtf/MathExtras.h>
65
66 namespace JSC { namespace DFG {
67
68 SpeculativeJIT::SpeculativeJIT(JITCompiler& jit)
69     : m_compileOkay(true)
70     , m_jit(jit)
71     , m_currentNode(0)
72     , m_lastGeneratedNode(LastNodeType)
73     , m_indexInBlock(0)
74     , m_generationInfo(m_jit.graph().frameRegisterCount())
75     , m_state(m_jit.graph())
76     , m_interpreter(m_jit.graph(), m_state)
77     , m_stream(&jit.jitCode()->variableEventStream)
78     , m_minifiedGraph(&jit.jitCode()->minifiedDFG)
79 {
80 }
81
82 SpeculativeJIT::~SpeculativeJIT()
83 {
84 }
85
86 void SpeculativeJIT::emitAllocateRawObject(GPRReg resultGPR, RegisteredStructure structure, GPRReg storageGPR, unsigned numElements, unsigned vectorLength)
87 {
88     IndexingType indexingType = structure->indexingType();
89     bool hasIndexingHeader = hasIndexedProperties(indexingType);
90
91     unsigned inlineCapacity = structure->inlineCapacity();
92     unsigned outOfLineCapacity = structure->outOfLineCapacity();
93     
94     GPRTemporary scratch(this);
95     GPRTemporary scratch2(this);
96     GPRReg scratchGPR = scratch.gpr();
97     GPRReg scratch2GPR = scratch2.gpr();
98
99     ASSERT(vectorLength >= numElements);
100     vectorLength = Butterfly::optimalContiguousVectorLength(structure.get(), vectorLength);
101     
102     JITCompiler::JumpList slowCases;
103
104     size_t size = 0;
105     if (hasIndexingHeader)
106         size += vectorLength * sizeof(JSValue) + sizeof(IndexingHeader);
107     size += outOfLineCapacity * sizeof(JSValue);
108
109     m_jit.move(TrustedImmPtr(0), storageGPR);
110     
111     if (size) {
112         if (MarkedAllocator* allocator = m_jit.vm()->auxiliarySpace.allocatorFor(size)) {
113             m_jit.move(TrustedImmPtr(allocator), scratchGPR);
114             m_jit.emitAllocate(storageGPR, allocator, scratchGPR, scratch2GPR, slowCases);
115             
116             m_jit.addPtr(
117                 TrustedImm32(outOfLineCapacity * sizeof(JSValue) + sizeof(IndexingHeader)),
118                 storageGPR);
119             
120             if (hasIndexingHeader)
121                 m_jit.store32(TrustedImm32(vectorLength), MacroAssembler::Address(storageGPR, Butterfly::offsetOfVectorLength()));
122         } else
123             slowCases.append(m_jit.jump());
124     }
125
126     size_t allocationSize = JSFinalObject::allocationSize(inlineCapacity);
127     MarkedAllocator* allocatorPtr = subspaceFor<JSFinalObject>(*m_jit.vm())->allocatorFor(allocationSize);
128     if (allocatorPtr) {
129         m_jit.move(TrustedImmPtr(allocatorPtr), scratchGPR);
130         emitAllocateJSObject(resultGPR, allocatorPtr, scratchGPR, TrustedImmPtr(structure), storageGPR, scratch2GPR, slowCases);
131         m_jit.emitInitializeInlineStorage(resultGPR, structure->inlineCapacity());
132     } else
133         slowCases.append(m_jit.jump());
134
135     // I want a slow path that also loads out the storage pointer, and that's
136     // what this custom CallArrayAllocatorSlowPathGenerator gives me. It's a lot
137     // of work for a very small piece of functionality. :-/
138     addSlowPathGenerator(std::make_unique<CallArrayAllocatorSlowPathGenerator>(
139         slowCases, this, operationNewRawObject, resultGPR, storageGPR,
140         structure, vectorLength));
141
142     if (numElements < vectorLength && LIKELY(!hasUndecided(structure->indexingType()))) {
143 #if USE(JSVALUE64)
144         if (hasDouble(structure->indexingType()))
145             m_jit.move(TrustedImm64(bitwise_cast<int64_t>(PNaN)), scratchGPR);
146         else
147             m_jit.move(TrustedImm64(JSValue::encode(JSValue())), scratchGPR);
148         for (unsigned i = numElements; i < vectorLength; ++i)
149             m_jit.store64(scratchGPR, MacroAssembler::Address(storageGPR, sizeof(double) * i));
150 #else
151         EncodedValueDescriptor value;
152         if (hasDouble(structure->indexingType()))
153             value.asInt64 = JSValue::encode(JSValue(JSValue::EncodeAsDouble, PNaN));
154         else
155             value.asInt64 = JSValue::encode(JSValue());
156         for (unsigned i = numElements; i < vectorLength; ++i) {
157             m_jit.store32(TrustedImm32(value.asBits.tag), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
158             m_jit.store32(TrustedImm32(value.asBits.payload), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
159         }
160 #endif
161     }
162     
163     if (hasIndexingHeader)
164         m_jit.store32(TrustedImm32(numElements), MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()));
165     
166     m_jit.emitInitializeOutOfLineStorage(storageGPR, structure->outOfLineCapacity());
167     
168     m_jit.mutatorFence(*m_jit.vm());
169 }
170
171 void SpeculativeJIT::emitGetLength(InlineCallFrame* inlineCallFrame, GPRReg lengthGPR, bool includeThis)
172 {
173     if (inlineCallFrame && !inlineCallFrame->isVarargs())
174         m_jit.move(TrustedImm32(inlineCallFrame->arguments.size() - !includeThis), lengthGPR);
175     else {
176         VirtualRegister argumentCountRegister = m_jit.argumentCount(inlineCallFrame);
177         m_jit.load32(JITCompiler::payloadFor(argumentCountRegister), lengthGPR);
178         if (!includeThis)
179             m_jit.sub32(TrustedImm32(1), lengthGPR);
180     }
181 }
182
183 void SpeculativeJIT::emitGetLength(CodeOrigin origin, GPRReg lengthGPR, bool includeThis)
184 {
185     emitGetLength(origin.inlineCallFrame, lengthGPR, includeThis);
186 }
187
188 void SpeculativeJIT::emitGetCallee(CodeOrigin origin, GPRReg calleeGPR)
189 {
190     if (origin.inlineCallFrame) {
191         if (origin.inlineCallFrame->isClosureCall) {
192             m_jit.loadPtr(
193                 JITCompiler::addressFor(origin.inlineCallFrame->calleeRecovery.virtualRegister()),
194                 calleeGPR);
195         } else {
196             m_jit.move(
197                 TrustedImmPtr::weakPointer(m_jit.graph(), origin.inlineCallFrame->calleeRecovery.constant().asCell()),
198                 calleeGPR);
199         }
200     } else
201         m_jit.loadPtr(JITCompiler::addressFor(CallFrameSlot::callee), calleeGPR);
202 }
203
204 void SpeculativeJIT::emitGetArgumentStart(CodeOrigin origin, GPRReg startGPR)
205 {
206     m_jit.addPtr(
207         TrustedImm32(
208             JITCompiler::argumentsStart(origin).offset() * static_cast<int>(sizeof(Register))),
209         GPRInfo::callFrameRegister, startGPR);
210 }
211
212 MacroAssembler::Jump SpeculativeJIT::emitOSRExitFuzzCheck()
213 {
214     if (!Options::useOSRExitFuzz()
215         || !canUseOSRExitFuzzing(m_jit.graph().baselineCodeBlockFor(m_origin.semantic))
216         || !doOSRExitFuzzing())
217         return MacroAssembler::Jump();
218     
219     MacroAssembler::Jump result;
220     
221     m_jit.pushToSave(GPRInfo::regT0);
222     m_jit.load32(&g_numberOfOSRExitFuzzChecks, GPRInfo::regT0);
223     m_jit.add32(TrustedImm32(1), GPRInfo::regT0);
224     m_jit.store32(GPRInfo::regT0, &g_numberOfOSRExitFuzzChecks);
225     unsigned atOrAfter = Options::fireOSRExitFuzzAtOrAfter();
226     unsigned at = Options::fireOSRExitFuzzAt();
227     if (at || atOrAfter) {
228         unsigned threshold;
229         MacroAssembler::RelationalCondition condition;
230         if (atOrAfter) {
231             threshold = atOrAfter;
232             condition = MacroAssembler::Below;
233         } else {
234             threshold = at;
235             condition = MacroAssembler::NotEqual;
236         }
237         MacroAssembler::Jump ok = m_jit.branch32(
238             condition, GPRInfo::regT0, MacroAssembler::TrustedImm32(threshold));
239         m_jit.popToRestore(GPRInfo::regT0);
240         result = m_jit.jump();
241         ok.link(&m_jit);
242     }
243     m_jit.popToRestore(GPRInfo::regT0);
244     
245     return result;
246 }
247
248 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail)
249 {
250     if (!m_compileOkay)
251         return;
252     JITCompiler::Jump fuzzJump = emitOSRExitFuzzCheck();
253     if (fuzzJump.isSet()) {
254         JITCompiler::JumpList jumpsToFail;
255         jumpsToFail.append(fuzzJump);
256         jumpsToFail.append(jumpToFail);
257         m_jit.appendExitInfo(jumpsToFail);
258     } else
259         m_jit.appendExitInfo(jumpToFail);
260     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(m_currentNode, node), this, m_stream->size()));
261 }
262
263 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, const MacroAssembler::JumpList& jumpsToFail)
264 {
265     if (!m_compileOkay)
266         return;
267     JITCompiler::Jump fuzzJump = emitOSRExitFuzzCheck();
268     if (fuzzJump.isSet()) {
269         JITCompiler::JumpList myJumpsToFail;
270         myJumpsToFail.append(jumpsToFail);
271         myJumpsToFail.append(fuzzJump);
272         m_jit.appendExitInfo(myJumpsToFail);
273     } else
274         m_jit.appendExitInfo(jumpsToFail);
275     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(m_currentNode, node), this, m_stream->size()));
276 }
277
278 OSRExitJumpPlaceholder SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node)
279 {
280     if (!m_compileOkay)
281         return OSRExitJumpPlaceholder();
282     unsigned index = m_jit.jitCode()->osrExit.size();
283     m_jit.appendExitInfo();
284     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(m_currentNode, node), this, m_stream->size()));
285     return OSRExitJumpPlaceholder(index);
286 }
287
288 OSRExitJumpPlaceholder SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse)
289 {
290     return speculationCheck(kind, jsValueSource, nodeUse.node());
291 }
292
293 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail)
294 {
295     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail);
296 }
297
298 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, const MacroAssembler::JumpList& jumpsToFail)
299 {
300     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpsToFail);
301 }
302
303 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
304 {
305     if (!m_compileOkay)
306         return;
307     unsigned recoveryIndex = m_jit.jitCode()->appendSpeculationRecovery(recovery);
308     m_jit.appendExitInfo(jumpToFail);
309     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(m_currentNode, node), this, m_stream->size(), recoveryIndex));
310 }
311
312 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
313 {
314     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail, recovery);
315 }
316
317 void SpeculativeJIT::emitInvalidationPoint(Node* node)
318 {
319     if (!m_compileOkay)
320         return;
321     OSRExitCompilationInfo& info = m_jit.appendExitInfo(JITCompiler::JumpList());
322     m_jit.jitCode()->appendOSRExit(OSRExit(
323         UncountableInvalidation, JSValueSource(), MethodOfGettingAValueProfile(),
324         this, m_stream->size()));
325     info.m_replacementSource = m_jit.watchpointLabel();
326     ASSERT(info.m_replacementSource.isSet());
327     noResult(node);
328 }
329
330 void SpeculativeJIT::unreachable(Node* node)
331 {
332     m_compileOkay = false;
333     m_jit.abortWithReason(DFGUnreachableNode, node->op());
334 }
335
336 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Node* node)
337 {
338     if (!m_compileOkay)
339         return;
340     speculationCheck(kind, jsValueRegs, node, m_jit.jump());
341     m_compileOkay = false;
342     if (verboseCompilationEnabled())
343         dataLog("Bailing compilation.\n");
344 }
345
346 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Edge nodeUse)
347 {
348     terminateSpeculativeExecution(kind, jsValueRegs, nodeUse.node());
349 }
350
351 void SpeculativeJIT::typeCheck(JSValueSource source, Edge edge, SpeculatedType typesPassedThrough, MacroAssembler::Jump jumpToFail, ExitKind exitKind)
352 {
353     ASSERT(needsTypeCheck(edge, typesPassedThrough));
354     m_interpreter.filter(edge, typesPassedThrough);
355     speculationCheck(exitKind, source, edge.node(), jumpToFail);
356 }
357
358 RegisterSet SpeculativeJIT::usedRegisters()
359 {
360     RegisterSet result;
361     
362     for (unsigned i = GPRInfo::numberOfRegisters; i--;) {
363         GPRReg gpr = GPRInfo::toRegister(i);
364         if (m_gprs.isInUse(gpr))
365             result.set(gpr);
366     }
367     for (unsigned i = FPRInfo::numberOfRegisters; i--;) {
368         FPRReg fpr = FPRInfo::toRegister(i);
369         if (m_fprs.isInUse(fpr))
370             result.set(fpr);
371     }
372     
373     result.merge(RegisterSet::stubUnavailableRegisters());
374     
375     return result;
376 }
377
378 void SpeculativeJIT::addSlowPathGenerator(std::unique_ptr<SlowPathGenerator> slowPathGenerator)
379 {
380     m_slowPathGenerators.append(WTFMove(slowPathGenerator));
381 }
382
383 void SpeculativeJIT::addSlowPathGenerator(std::function<void()> lambda)
384 {
385     m_slowPathLambdas.append(SlowPathLambda{ lambda, m_currentNode, static_cast<unsigned>(m_stream->size()) });
386 }
387
388 void SpeculativeJIT::runSlowPathGenerators(PCToCodeOriginMapBuilder& pcToCodeOriginMapBuilder)
389 {
390     for (auto& slowPathGenerator : m_slowPathGenerators) {
391         pcToCodeOriginMapBuilder.appendItem(m_jit.labelIgnoringWatchpoints(), slowPathGenerator->origin().semantic);
392         slowPathGenerator->generate(this);
393     }
394     for (auto& slowPathLambda : m_slowPathLambdas) {
395         Node* currentNode = slowPathLambda.currentNode;
396         m_currentNode = currentNode;
397         m_outOfLineStreamIndex = slowPathLambda.streamIndex;
398         pcToCodeOriginMapBuilder.appendItem(m_jit.labelIgnoringWatchpoints(), currentNode->origin.semantic);
399         slowPathLambda.generator();
400         m_outOfLineStreamIndex = std::nullopt;
401     }
402 }
403
404 void SpeculativeJIT::clearGenerationInfo()
405 {
406     for (unsigned i = 0; i < m_generationInfo.size(); ++i)
407         m_generationInfo[i] = GenerationInfo();
408     m_gprs = RegisterBank<GPRInfo>();
409     m_fprs = RegisterBank<FPRInfo>();
410 }
411
412 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForGPR(VirtualRegister spillMe, GPRReg source)
413 {
414     GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
415     Node* node = info.node();
416     DataFormat registerFormat = info.registerFormat();
417     ASSERT(registerFormat != DataFormatNone);
418     ASSERT(registerFormat != DataFormatDouble);
419         
420     SilentSpillAction spillAction;
421     SilentFillAction fillAction;
422         
423     if (!info.needsSpill())
424         spillAction = DoNothingForSpill;
425     else {
426 #if USE(JSVALUE64)
427         ASSERT(info.gpr() == source);
428         if (registerFormat == DataFormatInt32)
429             spillAction = Store32Payload;
430         else if (registerFormat == DataFormatCell || registerFormat == DataFormatStorage)
431             spillAction = StorePtr;
432         else if (registerFormat == DataFormatInt52 || registerFormat == DataFormatStrictInt52)
433             spillAction = Store64;
434         else {
435             ASSERT(registerFormat & DataFormatJS);
436             spillAction = Store64;
437         }
438 #elif USE(JSVALUE32_64)
439         if (registerFormat & DataFormatJS) {
440             ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
441             spillAction = source == info.tagGPR() ? Store32Tag : Store32Payload;
442         } else {
443             ASSERT(info.gpr() == source);
444             spillAction = Store32Payload;
445         }
446 #endif
447     }
448         
449     if (registerFormat == DataFormatInt32) {
450         ASSERT(info.gpr() == source);
451         ASSERT(isJSInt32(info.registerFormat()));
452         if (node->hasConstant()) {
453             ASSERT(node->isInt32Constant());
454             fillAction = SetInt32Constant;
455         } else
456             fillAction = Load32Payload;
457     } else if (registerFormat == DataFormatBoolean) {
458 #if USE(JSVALUE64)
459         RELEASE_ASSERT_NOT_REACHED();
460 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
461         fillAction = DoNothingForFill;
462 #endif
463 #elif USE(JSVALUE32_64)
464         ASSERT(info.gpr() == source);
465         if (node->hasConstant()) {
466             ASSERT(node->isBooleanConstant());
467             fillAction = SetBooleanConstant;
468         } else
469             fillAction = Load32Payload;
470 #endif
471     } else if (registerFormat == DataFormatCell) {
472         ASSERT(info.gpr() == source);
473         if (node->hasConstant()) {
474             DFG_ASSERT(m_jit.graph(), m_currentNode, node->isCellConstant());
475             node->asCell(); // To get the assertion.
476             fillAction = SetCellConstant;
477         } else {
478 #if USE(JSVALUE64)
479             fillAction = LoadPtr;
480 #else
481             fillAction = Load32Payload;
482 #endif
483         }
484     } else if (registerFormat == DataFormatStorage) {
485         ASSERT(info.gpr() == source);
486         fillAction = LoadPtr;
487     } else if (registerFormat == DataFormatInt52) {
488         if (node->hasConstant())
489             fillAction = SetInt52Constant;
490         else if (info.spillFormat() == DataFormatInt52)
491             fillAction = Load64;
492         else if (info.spillFormat() == DataFormatStrictInt52)
493             fillAction = Load64ShiftInt52Left;
494         else if (info.spillFormat() == DataFormatNone)
495             fillAction = Load64;
496         else {
497             RELEASE_ASSERT_NOT_REACHED();
498 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
499             fillAction = Load64; // Make GCC happy.
500 #endif
501         }
502     } else if (registerFormat == DataFormatStrictInt52) {
503         if (node->hasConstant())
504             fillAction = SetStrictInt52Constant;
505         else if (info.spillFormat() == DataFormatInt52)
506             fillAction = Load64ShiftInt52Right;
507         else if (info.spillFormat() == DataFormatStrictInt52)
508             fillAction = Load64;
509         else if (info.spillFormat() == DataFormatNone)
510             fillAction = Load64;
511         else {
512             RELEASE_ASSERT_NOT_REACHED();
513 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
514             fillAction = Load64; // Make GCC happy.
515 #endif
516         }
517     } else {
518         ASSERT(registerFormat & DataFormatJS);
519 #if USE(JSVALUE64)
520         ASSERT(info.gpr() == source);
521         if (node->hasConstant()) {
522             if (node->isCellConstant())
523                 fillAction = SetTrustedJSConstant;
524             else
525                 fillAction = SetJSConstant;
526         } else if (info.spillFormat() == DataFormatInt32) {
527             ASSERT(registerFormat == DataFormatJSInt32);
528             fillAction = Load32PayloadBoxInt;
529         } else
530             fillAction = Load64;
531 #else
532         ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
533         if (node->hasConstant())
534             fillAction = info.tagGPR() == source ? SetJSConstantTag : SetJSConstantPayload;
535         else if (info.payloadGPR() == source)
536             fillAction = Load32Payload;
537         else { // Fill the Tag
538             switch (info.spillFormat()) {
539             case DataFormatInt32:
540                 ASSERT(registerFormat == DataFormatJSInt32);
541                 fillAction = SetInt32Tag;
542                 break;
543             case DataFormatCell:
544                 ASSERT(registerFormat == DataFormatJSCell);
545                 fillAction = SetCellTag;
546                 break;
547             case DataFormatBoolean:
548                 ASSERT(registerFormat == DataFormatJSBoolean);
549                 fillAction = SetBooleanTag;
550                 break;
551             default:
552                 fillAction = Load32Tag;
553                 break;
554             }
555         }
556 #endif
557     }
558         
559     return SilentRegisterSavePlan(spillAction, fillAction, node, source);
560 }
561     
562 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForFPR(VirtualRegister spillMe, FPRReg source)
563 {
564     GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
565     Node* node = info.node();
566     ASSERT(info.registerFormat() == DataFormatDouble);
567
568     SilentSpillAction spillAction;
569     SilentFillAction fillAction;
570         
571     if (!info.needsSpill())
572         spillAction = DoNothingForSpill;
573     else {
574         ASSERT(!node->hasConstant());
575         ASSERT(info.spillFormat() == DataFormatNone);
576         ASSERT(info.fpr() == source);
577         spillAction = StoreDouble;
578     }
579         
580 #if USE(JSVALUE64)
581     if (node->hasConstant()) {
582         node->asNumber(); // To get the assertion.
583         fillAction = SetDoubleConstant;
584     } else {
585         ASSERT(info.spillFormat() == DataFormatNone || info.spillFormat() == DataFormatDouble);
586         fillAction = LoadDouble;
587     }
588 #elif USE(JSVALUE32_64)
589     ASSERT(info.registerFormat() == DataFormatDouble);
590     if (node->hasConstant()) {
591         node->asNumber(); // To get the assertion.
592         fillAction = SetDoubleConstant;
593     } else
594         fillAction = LoadDouble;
595 #endif
596
597     return SilentRegisterSavePlan(spillAction, fillAction, node, source);
598 }
599     
600 void SpeculativeJIT::silentSpill(const SilentRegisterSavePlan& plan)
601 {
602     switch (plan.spillAction()) {
603     case DoNothingForSpill:
604         break;
605     case Store32Tag:
606         m_jit.store32(plan.gpr(), JITCompiler::tagFor(plan.node()->virtualRegister()));
607         break;
608     case Store32Payload:
609         m_jit.store32(plan.gpr(), JITCompiler::payloadFor(plan.node()->virtualRegister()));
610         break;
611     case StorePtr:
612         m_jit.storePtr(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
613         break;
614 #if USE(JSVALUE64)
615     case Store64:
616         m_jit.store64(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
617         break;
618 #endif
619     case StoreDouble:
620         m_jit.storeDouble(plan.fpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
621         break;
622     default:
623         RELEASE_ASSERT_NOT_REACHED();
624     }
625 }
626     
627 void SpeculativeJIT::silentFill(const SilentRegisterSavePlan& plan)
628 {
629     switch (plan.fillAction()) {
630     case DoNothingForFill:
631         break;
632     case SetInt32Constant:
633         m_jit.move(Imm32(plan.node()->asInt32()), plan.gpr());
634         break;
635 #if USE(JSVALUE64)
636     case SetInt52Constant:
637         m_jit.move(Imm64(plan.node()->asAnyInt() << JSValue::int52ShiftAmount), plan.gpr());
638         break;
639     case SetStrictInt52Constant:
640         m_jit.move(Imm64(plan.node()->asAnyInt()), plan.gpr());
641         break;
642 #endif // USE(JSVALUE64)
643     case SetBooleanConstant:
644         m_jit.move(TrustedImm32(plan.node()->asBoolean()), plan.gpr());
645         break;
646     case SetCellConstant:
647         ASSERT(plan.node()->constant()->value().isCell());
648         m_jit.move(TrustedImmPtr(plan.node()->constant()), plan.gpr());
649         break;
650 #if USE(JSVALUE64)
651     case SetTrustedJSConstant:
652         m_jit.move(valueOfJSConstantAsImm64(plan.node()).asTrustedImm64(), plan.gpr());
653         break;
654     case SetJSConstant:
655         m_jit.move(valueOfJSConstantAsImm64(plan.node()), plan.gpr());
656         break;
657     case SetDoubleConstant:
658         m_jit.moveDouble(Imm64(reinterpretDoubleToInt64(plan.node()->asNumber())), plan.fpr());
659         break;
660     case Load32PayloadBoxInt:
661         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
662         m_jit.or64(GPRInfo::tagTypeNumberRegister, plan.gpr());
663         break;
664     case Load32PayloadConvertToInt52:
665         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
666         m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
667         m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
668         break;
669     case Load32PayloadSignExtend:
670         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
671         m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
672         break;
673 #else
674     case SetJSConstantTag:
675         m_jit.move(Imm32(plan.node()->asJSValue().tag()), plan.gpr());
676         break;
677     case SetJSConstantPayload:
678         m_jit.move(Imm32(plan.node()->asJSValue().payload()), plan.gpr());
679         break;
680     case SetInt32Tag:
681         m_jit.move(TrustedImm32(JSValue::Int32Tag), plan.gpr());
682         break;
683     case SetCellTag:
684         m_jit.move(TrustedImm32(JSValue::CellTag), plan.gpr());
685         break;
686     case SetBooleanTag:
687         m_jit.move(TrustedImm32(JSValue::BooleanTag), plan.gpr());
688         break;
689     case SetDoubleConstant:
690         m_jit.loadDouble(TrustedImmPtr(m_jit.addressOfDoubleConstant(plan.node())), plan.fpr());
691         break;
692 #endif
693     case Load32Tag:
694         m_jit.load32(JITCompiler::tagFor(plan.node()->virtualRegister()), plan.gpr());
695         break;
696     case Load32Payload:
697         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
698         break;
699     case LoadPtr:
700         m_jit.loadPtr(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
701         break;
702 #if USE(JSVALUE64)
703     case Load64:
704         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
705         break;
706     case Load64ShiftInt52Right:
707         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
708         m_jit.rshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
709         break;
710     case Load64ShiftInt52Left:
711         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
712         m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
713         break;
714 #endif
715     case LoadDouble:
716         m_jit.loadDouble(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.fpr());
717         break;
718     default:
719         RELEASE_ASSERT_NOT_REACHED();
720     }
721 }
722     
723 JITCompiler::Jump SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode, IndexingType shape)
724 {
725     switch (arrayMode.arrayClass()) {
726     case Array::OriginalArray: {
727         CRASH();
728 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
729         JITCompiler::Jump result; // I already know that VC++ takes unkindly to the expression "return Jump()", so I'm doing it this way in anticipation of someone eventually using VC++ to compile the DFG.
730         return result;
731 #endif
732     }
733         
734     case Array::Array:
735         m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
736         return m_jit.branch32(
737             MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | shape));
738         
739     case Array::NonArray:
740     case Array::OriginalNonArray:
741         m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
742         return m_jit.branch32(
743             MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
744         
745     case Array::PossiblyArray:
746         m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
747         return m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
748     }
749     
750     RELEASE_ASSERT_NOT_REACHED();
751     return JITCompiler::Jump();
752 }
753
754 JITCompiler::JumpList SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode)
755 {
756     JITCompiler::JumpList result;
757     
758     switch (arrayMode.type()) {
759     case Array::Int32:
760     case Array::Double:
761     case Array::Contiguous:
762     case Array::Undecided:
763         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, arrayMode.shapeMask());
764
765     case Array::ArrayStorage:
766     case Array::SlowPutArrayStorage: {
767         ASSERT(!arrayMode.isJSArrayWithOriginalStructure());
768         
769         if (arrayMode.isJSArray()) {
770             if (arrayMode.isSlowPut()) {
771                 result.append(
772                     m_jit.branchTest32(
773                         MacroAssembler::Zero, tempGPR, MacroAssembler::TrustedImm32(IsArray)));
774                 m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
775                 m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
776                 result.append(
777                     m_jit.branch32(
778                         MacroAssembler::Above, tempGPR,
779                         TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
780                 break;
781             }
782             m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
783             result.append(
784                 m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | ArrayStorageShape)));
785             break;
786         }
787         m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
788         if (arrayMode.isSlowPut()) {
789             m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
790             result.append(
791                 m_jit.branch32(
792                     MacroAssembler::Above, tempGPR,
793                     TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
794             break;
795         }
796         result.append(
797             m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(ArrayStorageShape)));
798         break;
799     }
800     default:
801         CRASH();
802         break;
803     }
804     
805     return result;
806 }
807
808 void SpeculativeJIT::checkArray(Node* node)
809 {
810     ASSERT(node->arrayMode().isSpecific());
811     ASSERT(!node->arrayMode().doesConversion());
812     
813     SpeculateCellOperand base(this, node->child1());
814     GPRReg baseReg = base.gpr();
815     
816     if (node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1()))) {
817         noResult(m_currentNode);
818         return;
819     }
820     
821     const ClassInfo* expectedClassInfo = 0;
822     
823     switch (node->arrayMode().type()) {
824     case Array::AnyTypedArray:
825     case Array::String:
826         RELEASE_ASSERT_NOT_REACHED(); // Should have been a Phantom(String:)
827         break;
828     case Array::Int32:
829     case Array::Double:
830     case Array::Contiguous:
831     case Array::Undecided:
832     case Array::ArrayStorage:
833     case Array::SlowPutArrayStorage: {
834         GPRTemporary temp(this);
835         GPRReg tempGPR = temp.gpr();
836         m_jit.load8(MacroAssembler::Address(baseReg, JSCell::indexingTypeAndMiscOffset()), tempGPR);
837         speculationCheck(
838             BadIndexingType, JSValueSource::unboxedCell(baseReg), 0,
839             jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
840         
841         noResult(m_currentNode);
842         return;
843     }
844     case Array::DirectArguments:
845         speculateCellTypeWithoutTypeFiltering(node->child1(), baseReg, DirectArgumentsType);
846         noResult(m_currentNode);
847         return;
848     case Array::ScopedArguments:
849         speculateCellTypeWithoutTypeFiltering(node->child1(), baseReg, ScopedArgumentsType);
850         noResult(m_currentNode);
851         return;
852     default:
853         speculateCellTypeWithoutTypeFiltering(
854             node->child1(), baseReg,
855             typeForTypedArrayType(node->arrayMode().typedArrayType()));
856         noResult(m_currentNode);
857         return;
858     }
859     
860     RELEASE_ASSERT(expectedClassInfo);
861     
862     GPRTemporary temp(this);
863     GPRTemporary temp2(this);
864     m_jit.emitLoadStructure(*m_jit.vm(), baseReg, temp.gpr(), temp2.gpr());
865     speculationCheck(
866         BadType, JSValueSource::unboxedCell(baseReg), node,
867         m_jit.branchPtr(
868             MacroAssembler::NotEqual,
869             MacroAssembler::Address(temp.gpr(), Structure::classInfoOffset()),
870             TrustedImmPtr(expectedClassInfo)));
871     
872     noResult(m_currentNode);
873 }
874
875 void SpeculativeJIT::arrayify(Node* node, GPRReg baseReg, GPRReg propertyReg)
876 {
877     ASSERT(node->arrayMode().doesConversion());
878     
879     GPRTemporary temp(this);
880     GPRTemporary structure;
881     GPRReg tempGPR = temp.gpr();
882     GPRReg structureGPR = InvalidGPRReg;
883     
884     if (node->op() != ArrayifyToStructure) {
885         GPRTemporary realStructure(this);
886         structure.adopt(realStructure);
887         structureGPR = structure.gpr();
888     }
889         
890     // We can skip all that comes next if we already have array storage.
891     MacroAssembler::JumpList slowPath;
892     
893     if (node->op() == ArrayifyToStructure) {
894         slowPath.append(m_jit.branchWeakStructure(
895             JITCompiler::NotEqual,
896             JITCompiler::Address(baseReg, JSCell::structureIDOffset()),
897             node->structure()));
898     } else {
899         m_jit.load8(
900             MacroAssembler::Address(baseReg, JSCell::indexingTypeAndMiscOffset()), tempGPR);
901         
902         slowPath.append(jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
903     }
904     
905     addSlowPathGenerator(std::make_unique<ArrayifySlowPathGenerator>(
906         slowPath, this, node, baseReg, propertyReg, tempGPR, structureGPR));
907     
908     noResult(m_currentNode);
909 }
910
911 void SpeculativeJIT::arrayify(Node* node)
912 {
913     ASSERT(node->arrayMode().isSpecific());
914     
915     SpeculateCellOperand base(this, node->child1());
916     
917     if (!node->child2()) {
918         arrayify(node, base.gpr(), InvalidGPRReg);
919         return;
920     }
921     
922     SpeculateInt32Operand property(this, node->child2());
923     
924     arrayify(node, base.gpr(), property.gpr());
925 }
926
927 GPRReg SpeculativeJIT::fillStorage(Edge edge)
928 {
929     VirtualRegister virtualRegister = edge->virtualRegister();
930     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
931     
932     switch (info.registerFormat()) {
933     case DataFormatNone: {
934         if (info.spillFormat() == DataFormatStorage) {
935             GPRReg gpr = allocate();
936             m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
937             m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr);
938             info.fillStorage(*m_stream, gpr);
939             return gpr;
940         }
941         
942         // Must be a cell; fill it as a cell and then return the pointer.
943         return fillSpeculateCell(edge);
944     }
945         
946     case DataFormatStorage: {
947         GPRReg gpr = info.gpr();
948         m_gprs.lock(gpr);
949         return gpr;
950     }
951         
952     default:
953         return fillSpeculateCell(edge);
954     }
955 }
956
957 void SpeculativeJIT::useChildren(Node* node)
958 {
959     if (node->flags() & NodeHasVarArgs) {
960         for (unsigned childIdx = node->firstChild(); childIdx < node->firstChild() + node->numChildren(); childIdx++) {
961             if (!!m_jit.graph().m_varArgChildren[childIdx])
962                 use(m_jit.graph().m_varArgChildren[childIdx]);
963         }
964     } else {
965         Edge child1 = node->child1();
966         if (!child1) {
967             ASSERT(!node->child2() && !node->child3());
968             return;
969         }
970         use(child1);
971         
972         Edge child2 = node->child2();
973         if (!child2) {
974             ASSERT(!node->child3());
975             return;
976         }
977         use(child2);
978         
979         Edge child3 = node->child3();
980         if (!child3)
981             return;
982         use(child3);
983     }
984 }
985
986 void SpeculativeJIT::compileTryGetById(Node* node)
987 {
988     switch (node->child1().useKind()) {
989     case CellUse: {
990         SpeculateCellOperand base(this, node->child1());
991         JSValueRegsTemporary result(this, Reuse, base);
992
993         JSValueRegs baseRegs = JSValueRegs::payloadOnly(base.gpr());
994         JSValueRegs resultRegs = result.regs();
995
996         base.use();
997
998         cachedGetById(node->origin.semantic, baseRegs, resultRegs, node->identifierNumber(), JITCompiler::Jump(), NeedToSpill, AccessType::TryGet);
999
1000         jsValueResult(resultRegs, node, DataFormatJS, UseChildrenCalledExplicitly);
1001         break;
1002     }
1003
1004     case UntypedUse: {
1005         JSValueOperand base(this, node->child1());
1006         JSValueRegsTemporary result(this, Reuse, base);
1007
1008         JSValueRegs baseRegs = base.jsValueRegs();
1009         JSValueRegs resultRegs = result.regs();
1010
1011         base.use();
1012
1013         JITCompiler::Jump notCell = m_jit.branchIfNotCell(baseRegs);
1014
1015         cachedGetById(node->origin.semantic, baseRegs, resultRegs, node->identifierNumber(), notCell, NeedToSpill, AccessType::TryGet);
1016
1017         jsValueResult(resultRegs, node, DataFormatJS, UseChildrenCalledExplicitly);
1018         break;
1019     }
1020
1021     default:
1022         DFG_CRASH(m_jit.graph(), node, "Bad use kind");
1023         break;
1024     } 
1025 }
1026
1027 void SpeculativeJIT::compileIn(Node* node)
1028 {
1029     SpeculateCellOperand base(this, node->child1());
1030     GPRReg baseGPR = base.gpr();
1031     
1032     if (JSString* string = node->child2()->dynamicCastConstant<JSString*>(*m_jit.vm())) {
1033         if (string->tryGetValueImpl() && string->tryGetValueImpl()->isAtomic()) {
1034             StructureStubInfo* stubInfo = m_jit.codeBlock()->addStubInfo(AccessType::In);
1035             
1036             GPRTemporary result(this);
1037             GPRReg resultGPR = result.gpr();
1038
1039             use(node->child2());
1040             
1041             MacroAssembler::PatchableJump jump = m_jit.patchableJump();
1042             MacroAssembler::Label done = m_jit.label();
1043             
1044             // Since this block is executed only when the result of string->tryGetValueImpl() is atomic,
1045             // we can cast it to const AtomicStringImpl* safely.
1046             auto slowPath = slowPathCall(
1047                 jump.m_jump, this, operationInOptimize,
1048                 JSValueRegs::payloadOnly(resultGPR), stubInfo, baseGPR,
1049                 static_cast<const AtomicStringImpl*>(string->tryGetValueImpl()));
1050             
1051             stubInfo->callSiteIndex = m_jit.addCallSite(node->origin.semantic);
1052             stubInfo->codeOrigin = node->origin.semantic;
1053             stubInfo->patch.baseGPR = static_cast<int8_t>(baseGPR);
1054             stubInfo->patch.valueGPR = static_cast<int8_t>(resultGPR);
1055             stubInfo->patch.thisGPR = static_cast<int8_t>(InvalidGPRReg);
1056 #if USE(JSVALUE32_64)
1057             stubInfo->patch.valueTagGPR = static_cast<int8_t>(InvalidGPRReg);
1058             stubInfo->patch.baseTagGPR = static_cast<int8_t>(InvalidGPRReg);
1059             stubInfo->patch.thisTagGPR = static_cast<int8_t>(InvalidGPRReg);
1060 #endif
1061             stubInfo->patch.usedRegisters = usedRegisters();
1062
1063             m_jit.addIn(InRecord(jump, done, slowPath.get(), stubInfo));
1064             addSlowPathGenerator(WTFMove(slowPath));
1065
1066             base.use();
1067
1068             blessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
1069             return;
1070         }
1071     }
1072
1073     JSValueOperand key(this, node->child2());
1074     JSValueRegs regs = key.jsValueRegs();
1075         
1076     GPRFlushedCallResult result(this);
1077     GPRReg resultGPR = result.gpr();
1078         
1079     base.use();
1080     key.use();
1081         
1082     flushRegisters();
1083     callOperation(
1084         operationGenericIn, extractResult(JSValueRegs::payloadOnly(resultGPR)),
1085         baseGPR, regs);
1086     m_jit.exceptionCheck();
1087     blessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
1088 }
1089
1090 void SpeculativeJIT::compileDeleteById(Node* node)
1091 {
1092     JSValueOperand value(this, node->child1());
1093     GPRFlushedCallResult result(this);
1094
1095     JSValueRegs valueRegs = value.jsValueRegs();
1096     GPRReg resultGPR = result.gpr();
1097
1098     value.use();
1099
1100     flushRegisters();
1101     callOperation(operationDeleteById, resultGPR, valueRegs, identifierUID(node->identifierNumber()));
1102     m_jit.exceptionCheck();
1103
1104     unblessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
1105 }
1106
1107 void SpeculativeJIT::compileDeleteByVal(Node* node)
1108 {
1109     JSValueOperand base(this, node->child1());
1110     JSValueOperand key(this, node->child2());
1111     GPRFlushedCallResult result(this);
1112
1113     JSValueRegs baseRegs = base.jsValueRegs();
1114     JSValueRegs keyRegs = key.jsValueRegs();
1115     GPRReg resultGPR = result.gpr();
1116
1117     base.use();
1118     key.use();
1119
1120     flushRegisters();
1121     callOperation(operationDeleteByVal, resultGPR, baseRegs, keyRegs);
1122     m_jit.exceptionCheck();
1123
1124     unblessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
1125 }
1126
1127 bool SpeculativeJIT::nonSpeculativeCompare(Node* node, MacroAssembler::RelationalCondition cond, S_JITOperation_EJJ helperFunction)
1128 {
1129     unsigned branchIndexInBlock = detectPeepHoleBranch();
1130     if (branchIndexInBlock != UINT_MAX) {
1131         Node* branchNode = m_block->at(branchIndexInBlock);
1132
1133         ASSERT(node->adjustedRefCount() == 1);
1134         
1135         nonSpeculativePeepholeBranch(node, branchNode, cond, helperFunction);
1136     
1137         m_indexInBlock = branchIndexInBlock;
1138         m_currentNode = branchNode;
1139         
1140         return true;
1141     }
1142     
1143     nonSpeculativeNonPeepholeCompare(node, cond, helperFunction);
1144     
1145     return false;
1146 }
1147
1148 bool SpeculativeJIT::nonSpeculativeStrictEq(Node* node, bool invert)
1149 {
1150     unsigned branchIndexInBlock = detectPeepHoleBranch();
1151     if (branchIndexInBlock != UINT_MAX) {
1152         Node* branchNode = m_block->at(branchIndexInBlock);
1153
1154         ASSERT(node->adjustedRefCount() == 1);
1155         
1156         nonSpeculativePeepholeStrictEq(node, branchNode, invert);
1157     
1158         m_indexInBlock = branchIndexInBlock;
1159         m_currentNode = branchNode;
1160         
1161         return true;
1162     }
1163     
1164     nonSpeculativeNonPeepholeStrictEq(node, invert);
1165     
1166     return false;
1167 }
1168
1169 static const char* dataFormatString(DataFormat format)
1170 {
1171     // These values correspond to the DataFormat enum.
1172     const char* strings[] = {
1173         "[  ]",
1174         "[ i]",
1175         "[ d]",
1176         "[ c]",
1177         "Err!",
1178         "Err!",
1179         "Err!",
1180         "Err!",
1181         "[J ]",
1182         "[Ji]",
1183         "[Jd]",
1184         "[Jc]",
1185         "Err!",
1186         "Err!",
1187         "Err!",
1188         "Err!",
1189     };
1190     return strings[format];
1191 }
1192
1193 void SpeculativeJIT::dump(const char* label)
1194 {
1195     if (label)
1196         dataLogF("<%s>\n", label);
1197
1198     dataLogF("  gprs:\n");
1199     m_gprs.dump();
1200     dataLogF("  fprs:\n");
1201     m_fprs.dump();
1202     dataLogF("  VirtualRegisters:\n");
1203     for (unsigned i = 0; i < m_generationInfo.size(); ++i) {
1204         GenerationInfo& info = m_generationInfo[i];
1205         if (info.alive())
1206             dataLogF("    % 3d:%s%s", i, dataFormatString(info.registerFormat()), dataFormatString(info.spillFormat()));
1207         else
1208             dataLogF("    % 3d:[__][__]", i);
1209         if (info.registerFormat() == DataFormatDouble)
1210             dataLogF(":fpr%d\n", info.fpr());
1211         else if (info.registerFormat() != DataFormatNone
1212 #if USE(JSVALUE32_64)
1213             && !(info.registerFormat() & DataFormatJS)
1214 #endif
1215             ) {
1216             ASSERT(info.gpr() != InvalidGPRReg);
1217             dataLogF(":%s\n", GPRInfo::debugName(info.gpr()));
1218         } else
1219             dataLogF("\n");
1220     }
1221     if (label)
1222         dataLogF("</%s>\n", label);
1223 }
1224
1225 GPRTemporary::GPRTemporary()
1226     : m_jit(0)
1227     , m_gpr(InvalidGPRReg)
1228 {
1229 }
1230
1231 GPRTemporary::GPRTemporary(SpeculativeJIT* jit)
1232     : m_jit(jit)
1233     , m_gpr(InvalidGPRReg)
1234 {
1235     m_gpr = m_jit->allocate();
1236 }
1237
1238 GPRTemporary::GPRTemporary(SpeculativeJIT* jit, GPRReg specific)
1239     : m_jit(jit)
1240     , m_gpr(InvalidGPRReg)
1241 {
1242     m_gpr = m_jit->allocate(specific);
1243 }
1244
1245 #if USE(JSVALUE32_64)
1246 GPRTemporary::GPRTemporary(
1247     SpeculativeJIT* jit, ReuseTag, JSValueOperand& op1, WhichValueWord which)
1248     : m_jit(jit)
1249     , m_gpr(InvalidGPRReg)
1250 {
1251     if (!op1.isDouble() && m_jit->canReuse(op1.node()))
1252         m_gpr = m_jit->reuse(op1.gpr(which));
1253     else
1254         m_gpr = m_jit->allocate();
1255 }
1256 #endif // USE(JSVALUE32_64)
1257
1258 JSValueRegsTemporary::JSValueRegsTemporary() { }
1259
1260 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit)
1261 #if USE(JSVALUE64)
1262     : m_gpr(jit)
1263 #else
1264     : m_payloadGPR(jit)
1265     , m_tagGPR(jit)
1266 #endif
1267 {
1268 }
1269
1270 #if USE(JSVALUE64)
1271 template<typename T>
1272 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit, ReuseTag, T& operand, WhichValueWord)
1273     : m_gpr(jit, Reuse, operand)
1274 {
1275 }
1276 #else
1277 template<typename T>
1278 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit, ReuseTag, T& operand, WhichValueWord resultWord)
1279 {
1280     if (resultWord == PayloadWord) {
1281         m_payloadGPR = GPRTemporary(jit, Reuse, operand);
1282         m_tagGPR = GPRTemporary(jit);
1283     } else {
1284         m_payloadGPR = GPRTemporary(jit);
1285         m_tagGPR = GPRTemporary(jit, Reuse, operand);
1286     }
1287 }
1288 #endif
1289
1290 #if USE(JSVALUE64)
1291 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit, ReuseTag, JSValueOperand& operand)
1292 {
1293     m_gpr = GPRTemporary(jit, Reuse, operand);
1294 }
1295 #else
1296 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit, ReuseTag, JSValueOperand& operand)
1297 {
1298     if (jit->canReuse(operand.node())) {
1299         m_payloadGPR = GPRTemporary(jit, Reuse, operand, PayloadWord);
1300         m_tagGPR = GPRTemporary(jit, Reuse, operand, TagWord);
1301     } else {
1302         m_payloadGPR = GPRTemporary(jit);
1303         m_tagGPR = GPRTemporary(jit);
1304     }
1305 }
1306 #endif
1307
1308 JSValueRegsTemporary::~JSValueRegsTemporary() { }
1309
1310 JSValueRegs JSValueRegsTemporary::regs()
1311 {
1312 #if USE(JSVALUE64)
1313     return JSValueRegs(m_gpr.gpr());
1314 #else
1315     return JSValueRegs(m_tagGPR.gpr(), m_payloadGPR.gpr());
1316 #endif
1317 }
1318
1319 void GPRTemporary::adopt(GPRTemporary& other)
1320 {
1321     ASSERT(!m_jit);
1322     ASSERT(m_gpr == InvalidGPRReg);
1323     ASSERT(other.m_jit);
1324     ASSERT(other.m_gpr != InvalidGPRReg);
1325     m_jit = other.m_jit;
1326     m_gpr = other.m_gpr;
1327     other.m_jit = 0;
1328     other.m_gpr = InvalidGPRReg;
1329 }
1330
1331 FPRTemporary::FPRTemporary(FPRTemporary&& other)
1332 {
1333     ASSERT(other.m_jit);
1334     ASSERT(other.m_fpr != InvalidFPRReg);
1335     m_jit = other.m_jit;
1336     m_fpr = other.m_fpr;
1337
1338     other.m_jit = nullptr;
1339 }
1340
1341 FPRTemporary::FPRTemporary(SpeculativeJIT* jit)
1342     : m_jit(jit)
1343     , m_fpr(InvalidFPRReg)
1344 {
1345     m_fpr = m_jit->fprAllocate();
1346 }
1347
1348 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1)
1349     : m_jit(jit)
1350     , m_fpr(InvalidFPRReg)
1351 {
1352     if (m_jit->canReuse(op1.node()))
1353         m_fpr = m_jit->reuse(op1.fpr());
1354     else
1355         m_fpr = m_jit->fprAllocate();
1356 }
1357
1358 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1, SpeculateDoubleOperand& op2)
1359     : m_jit(jit)
1360     , m_fpr(InvalidFPRReg)
1361 {
1362     if (m_jit->canReuse(op1.node()))
1363         m_fpr = m_jit->reuse(op1.fpr());
1364     else if (m_jit->canReuse(op2.node()))
1365         m_fpr = m_jit->reuse(op2.fpr());
1366     else if (m_jit->canReuse(op1.node(), op2.node()) && op1.fpr() == op2.fpr())
1367         m_fpr = m_jit->reuse(op1.fpr());
1368     else
1369         m_fpr = m_jit->fprAllocate();
1370 }
1371
1372 #if USE(JSVALUE32_64)
1373 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, JSValueOperand& op1)
1374     : m_jit(jit)
1375     , m_fpr(InvalidFPRReg)
1376 {
1377     if (op1.isDouble() && m_jit->canReuse(op1.node()))
1378         m_fpr = m_jit->reuse(op1.fpr());
1379     else
1380         m_fpr = m_jit->fprAllocate();
1381 }
1382 #endif
1383
1384 void SpeculativeJIT::compilePeepHoleDoubleBranch(Node* node, Node* branchNode, JITCompiler::DoubleCondition condition)
1385 {
1386     BasicBlock* taken = branchNode->branchData()->taken.block;
1387     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1388
1389     if (taken == nextBlock()) {
1390         condition = MacroAssembler::invert(condition);
1391         std::swap(taken, notTaken);
1392     }
1393
1394     SpeculateDoubleOperand op1(this, node->child1());
1395     SpeculateDoubleOperand op2(this, node->child2());
1396     
1397     branchDouble(condition, op1.fpr(), op2.fpr(), taken);
1398     jump(notTaken);
1399 }
1400
1401 void SpeculativeJIT::compilePeepHoleObjectEquality(Node* node, Node* branchNode)
1402 {
1403     BasicBlock* taken = branchNode->branchData()->taken.block;
1404     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1405
1406     MacroAssembler::RelationalCondition condition = MacroAssembler::Equal;
1407     
1408     if (taken == nextBlock()) {
1409         condition = MacroAssembler::NotEqual;
1410         BasicBlock* tmp = taken;
1411         taken = notTaken;
1412         notTaken = tmp;
1413     }
1414
1415     SpeculateCellOperand op1(this, node->child1());
1416     SpeculateCellOperand op2(this, node->child2());
1417     
1418     GPRReg op1GPR = op1.gpr();
1419     GPRReg op2GPR = op2.gpr();
1420     
1421     if (masqueradesAsUndefinedWatchpointIsStillValid()) {
1422         if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1423             speculationCheck(
1424                 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(), m_jit.branchIfNotObject(op1GPR));
1425         }
1426         if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1427             speculationCheck(
1428                 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(), m_jit.branchIfNotObject(op2GPR));
1429         }
1430     } else {
1431         if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1432             speculationCheck(
1433                 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1434                 m_jit.branchIfNotObject(op1GPR));
1435         }
1436         speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1437             m_jit.branchTest8(
1438                 MacroAssembler::NonZero, 
1439                 MacroAssembler::Address(op1GPR, JSCell::typeInfoFlagsOffset()), 
1440                 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1441
1442         if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1443             speculationCheck(
1444                 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1445                 m_jit.branchIfNotObject(op2GPR));
1446         }
1447         speculationCheck(BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1448             m_jit.branchTest8(
1449                 MacroAssembler::NonZero, 
1450                 MacroAssembler::Address(op2GPR, JSCell::typeInfoFlagsOffset()), 
1451                 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1452     }
1453
1454     branchPtr(condition, op1GPR, op2GPR, taken);
1455     jump(notTaken);
1456 }
1457
1458 void SpeculativeJIT::compilePeepHoleBooleanBranch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1459 {
1460     BasicBlock* taken = branchNode->branchData()->taken.block;
1461     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1462
1463     // The branch instruction will branch to the taken block.
1464     // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1465     if (taken == nextBlock()) {
1466         condition = JITCompiler::invert(condition);
1467         BasicBlock* tmp = taken;
1468         taken = notTaken;
1469         notTaken = tmp;
1470     }
1471
1472     if (node->child1()->isInt32Constant()) {
1473         int32_t imm = node->child1()->asInt32();
1474         SpeculateBooleanOperand op2(this, node->child2());
1475         branch32(condition, JITCompiler::Imm32(imm), op2.gpr(), taken);
1476     } else if (node->child2()->isInt32Constant()) {
1477         SpeculateBooleanOperand op1(this, node->child1());
1478         int32_t imm = node->child2()->asInt32();
1479         branch32(condition, op1.gpr(), JITCompiler::Imm32(imm), taken);
1480     } else {
1481         SpeculateBooleanOperand op1(this, node->child1());
1482         SpeculateBooleanOperand op2(this, node->child2());
1483         branch32(condition, op1.gpr(), op2.gpr(), taken);
1484     }
1485
1486     jump(notTaken);
1487 }
1488
1489 void SpeculativeJIT::compileToLowerCase(Node* node)
1490 {
1491     ASSERT(node->op() == ToLowerCase);
1492     SpeculateCellOperand string(this, node->child1());
1493     GPRTemporary temp(this);
1494     GPRTemporary index(this);
1495     GPRTemporary charReg(this);
1496     GPRTemporary length(this);
1497
1498     GPRReg stringGPR = string.gpr();
1499     GPRReg tempGPR = temp.gpr();
1500     GPRReg indexGPR = index.gpr();
1501     GPRReg charGPR = charReg.gpr();
1502     GPRReg lengthGPR = length.gpr();
1503
1504     speculateString(node->child1(), stringGPR);
1505
1506     CCallHelpers::JumpList slowPath;
1507
1508     m_jit.move(TrustedImmPtr(0), indexGPR);
1509
1510     m_jit.loadPtr(MacroAssembler::Address(stringGPR, JSString::offsetOfValue()), tempGPR);
1511     slowPath.append(m_jit.branchTestPtr(MacroAssembler::Zero, tempGPR));
1512
1513     slowPath.append(m_jit.branchTest32(
1514         MacroAssembler::Zero, MacroAssembler::Address(tempGPR, StringImpl::flagsOffset()),
1515         MacroAssembler::TrustedImm32(StringImpl::flagIs8Bit())));
1516     m_jit.load32(MacroAssembler::Address(tempGPR, StringImpl::lengthMemoryOffset()), lengthGPR);
1517     m_jit.loadPtr(MacroAssembler::Address(tempGPR, StringImpl::dataOffset()), tempGPR);
1518
1519     auto loopStart = m_jit.label();
1520     auto loopDone = m_jit.branch32(CCallHelpers::AboveOrEqual, indexGPR, lengthGPR);
1521     m_jit.load8(MacroAssembler::BaseIndex(tempGPR, indexGPR, MacroAssembler::TimesOne), charGPR);
1522     slowPath.append(m_jit.branchTest32(CCallHelpers::NonZero, charGPR, TrustedImm32(~0x7F)));
1523     m_jit.sub32(TrustedImm32('A'), charGPR);
1524     slowPath.append(m_jit.branch32(CCallHelpers::BelowOrEqual, charGPR, TrustedImm32('Z' - 'A')));
1525
1526     m_jit.add32(TrustedImm32(1), indexGPR);
1527     m_jit.jump().linkTo(loopStart, &m_jit);
1528     
1529     slowPath.link(&m_jit);
1530     silentSpillAllRegisters(lengthGPR);
1531     callOperation(operationToLowerCase, lengthGPR, stringGPR, indexGPR);
1532     silentFillAllRegisters();
1533     m_jit.exceptionCheck();
1534     auto done = m_jit.jump();
1535
1536     loopDone.link(&m_jit);
1537     m_jit.move(stringGPR, lengthGPR);
1538
1539     done.link(&m_jit);
1540     cellResult(lengthGPR, node);
1541 }
1542
1543 void SpeculativeJIT::compilePeepHoleInt32Branch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1544 {
1545     BasicBlock* taken = branchNode->branchData()->taken.block;
1546     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1547
1548     // The branch instruction will branch to the taken block.
1549     // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1550     if (taken == nextBlock()) {
1551         condition = JITCompiler::invert(condition);
1552         BasicBlock* tmp = taken;
1553         taken = notTaken;
1554         notTaken = tmp;
1555     }
1556
1557     if (node->child1()->isInt32Constant()) {
1558         int32_t imm = node->child1()->asInt32();
1559         SpeculateInt32Operand op2(this, node->child2());
1560         branch32(condition, JITCompiler::Imm32(imm), op2.gpr(), taken);
1561     } else if (node->child2()->isInt32Constant()) {
1562         SpeculateInt32Operand op1(this, node->child1());
1563         int32_t imm = node->child2()->asInt32();
1564         branch32(condition, op1.gpr(), JITCompiler::Imm32(imm), taken);
1565     } else {
1566         SpeculateInt32Operand op1(this, node->child1());
1567         SpeculateInt32Operand op2(this, node->child2());
1568         branch32(condition, op1.gpr(), op2.gpr(), taken);
1569     }
1570
1571     jump(notTaken);
1572 }
1573
1574 // Returns true if the compare is fused with a subsequent branch.
1575 bool SpeculativeJIT::compilePeepHoleBranch(Node* node, MacroAssembler::RelationalCondition condition, MacroAssembler::DoubleCondition doubleCondition, S_JITOperation_EJJ operation)
1576 {
1577     // Fused compare & branch.
1578     unsigned branchIndexInBlock = detectPeepHoleBranch();
1579     if (branchIndexInBlock != UINT_MAX) {
1580         Node* branchNode = m_block->at(branchIndexInBlock);
1581
1582         // detectPeepHoleBranch currently only permits the branch to be the very next node,
1583         // so can be no intervening nodes to also reference the compare. 
1584         ASSERT(node->adjustedRefCount() == 1);
1585
1586         if (node->isBinaryUseKind(Int32Use))
1587             compilePeepHoleInt32Branch(node, branchNode, condition);
1588 #if USE(JSVALUE64)
1589         else if (node->isBinaryUseKind(Int52RepUse))
1590             compilePeepHoleInt52Branch(node, branchNode, condition);
1591 #endif // USE(JSVALUE64)
1592         else if (node->isBinaryUseKind(StringUse) || node->isBinaryUseKind(StringIdentUse)) {
1593             // Use non-peephole comparison, for now.
1594             return false;
1595         } else if (node->isBinaryUseKind(DoubleRepUse))
1596             compilePeepHoleDoubleBranch(node, branchNode, doubleCondition);
1597         else if (node->op() == CompareEq) {
1598             if (node->isBinaryUseKind(BooleanUse))
1599                 compilePeepHoleBooleanBranch(node, branchNode, condition);
1600             else if (node->isBinaryUseKind(SymbolUse))
1601                 compilePeepHoleSymbolEquality(node, branchNode);
1602             else if (node->isBinaryUseKind(ObjectUse))
1603                 compilePeepHoleObjectEquality(node, branchNode);
1604             else if (node->isBinaryUseKind(ObjectUse, ObjectOrOtherUse))
1605                 compilePeepHoleObjectToObjectOrOtherEquality(node->child1(), node->child2(), branchNode);
1606             else if (node->isBinaryUseKind(ObjectOrOtherUse, ObjectUse))
1607                 compilePeepHoleObjectToObjectOrOtherEquality(node->child2(), node->child1(), branchNode);
1608             else if (!needsTypeCheck(node->child1(), SpecOther))
1609                 nonSpeculativePeepholeBranchNullOrUndefined(node->child2(), branchNode);
1610             else if (!needsTypeCheck(node->child2(), SpecOther))
1611                 nonSpeculativePeepholeBranchNullOrUndefined(node->child1(), branchNode);
1612             else {
1613                 nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1614                 return true;
1615             }
1616         } else {
1617             nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1618             return true;
1619         }
1620
1621         use(node->child1());
1622         use(node->child2());
1623         m_indexInBlock = branchIndexInBlock;
1624         m_currentNode = branchNode;
1625         return true;
1626     }
1627     return false;
1628 }
1629
1630 void SpeculativeJIT::noticeOSRBirth(Node* node)
1631 {
1632     if (!node->hasVirtualRegister())
1633         return;
1634     
1635     VirtualRegister virtualRegister = node->virtualRegister();
1636     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1637     
1638     info.noticeOSRBirth(*m_stream, node, virtualRegister);
1639 }
1640
1641 void SpeculativeJIT::compileMovHint(Node* node)
1642 {
1643     ASSERT(node->containsMovHint() && node->op() != ZombieHint);
1644     
1645     Node* child = node->child1().node();
1646     noticeOSRBirth(child);
1647     
1648     m_stream->appendAndLog(VariableEvent::movHint(MinifiedID(child), node->unlinkedLocal()));
1649 }
1650
1651 void SpeculativeJIT::bail(AbortReason reason)
1652 {
1653     if (verboseCompilationEnabled())
1654         dataLog("Bailing compilation.\n");
1655     m_compileOkay = true;
1656     m_jit.abortWithReason(reason, m_lastGeneratedNode);
1657     clearGenerationInfo();
1658 }
1659
1660 void SpeculativeJIT::compileCurrentBlock()
1661 {
1662     ASSERT(m_compileOkay);
1663     
1664     if (!m_block)
1665         return;
1666     
1667     ASSERT(m_block->isReachable);
1668     
1669     m_jit.blockHeads()[m_block->index] = m_jit.label();
1670
1671     if (!m_block->intersectionOfCFAHasVisited) {
1672         // Don't generate code for basic blocks that are unreachable according to CFA.
1673         // But to be sure that nobody has generated a jump to this block, drop in a
1674         // breakpoint here.
1675         m_jit.abortWithReason(DFGUnreachableBasicBlock);
1676         return;
1677     }
1678
1679     m_stream->appendAndLog(VariableEvent::reset());
1680     
1681     m_jit.jitAssertHasValidCallFrame();
1682     m_jit.jitAssertTagsInPlace();
1683     m_jit.jitAssertArgumentCountSane();
1684
1685     m_state.reset();
1686     m_state.beginBasicBlock(m_block);
1687     
1688     for (size_t i = m_block->variablesAtHead.size(); i--;) {
1689         int operand = m_block->variablesAtHead.operandForIndex(i);
1690         Node* node = m_block->variablesAtHead[i];
1691         if (!node)
1692             continue; // No need to record dead SetLocal's.
1693         
1694         VariableAccessData* variable = node->variableAccessData();
1695         DataFormat format;
1696         if (!node->refCount())
1697             continue; // No need to record dead SetLocal's.
1698         format = dataFormatFor(variable->flushFormat());
1699         m_stream->appendAndLog(
1700             VariableEvent::setLocal(
1701                 VirtualRegister(operand),
1702                 variable->machineLocal(),
1703                 format));
1704     }
1705
1706     m_origin = NodeOrigin();
1707     
1708     for (m_indexInBlock = 0; m_indexInBlock < m_block->size(); ++m_indexInBlock) {
1709         m_currentNode = m_block->at(m_indexInBlock);
1710         
1711         // We may have hit a contradiction that the CFA was aware of but that the JIT
1712         // didn't cause directly.
1713         if (!m_state.isValid()) {
1714             bail(DFGBailedAtTopOfBlock);
1715             return;
1716         }
1717
1718         m_interpreter.startExecuting();
1719         m_interpreter.executeKnownEdgeTypes(m_currentNode);
1720         m_jit.setForNode(m_currentNode);
1721         m_origin = m_currentNode->origin;
1722         if (validationEnabled())
1723             m_origin.exitOK &= mayExit(m_jit.graph(), m_currentNode) == Exits;
1724         m_lastGeneratedNode = m_currentNode->op();
1725         
1726         ASSERT(m_currentNode->shouldGenerate());
1727         
1728         if (verboseCompilationEnabled()) {
1729             dataLogF(
1730                 "SpeculativeJIT generating Node @%d (bc#%u) at JIT offset 0x%x",
1731                 (int)m_currentNode->index(),
1732                 m_currentNode->origin.semantic.bytecodeIndex, m_jit.debugOffset());
1733             dataLog("\n");
1734         }
1735
1736         if (Options::validateDFGExceptionHandling() && (mayExit(m_jit.graph(), m_currentNode) != DoesNotExit || m_currentNode->isTerminal()))
1737             m_jit.jitReleaseAssertNoException(*m_jit.vm());
1738
1739         m_jit.pcToCodeOriginMapBuilder().appendItem(m_jit.labelIgnoringWatchpoints(), m_origin.semantic);
1740
1741         compile(m_currentNode);
1742         
1743         if (belongsInMinifiedGraph(m_currentNode->op()))
1744             m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode));
1745         
1746 #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
1747         m_jit.clearRegisterAllocationOffsets();
1748 #endif
1749         
1750         if (!m_compileOkay) {
1751             bail(DFGBailedAtEndOfNode);
1752             return;
1753         }
1754         
1755         // Make sure that the abstract state is rematerialized for the next node.
1756         m_interpreter.executeEffects(m_indexInBlock);
1757     }
1758     
1759     // Perform the most basic verification that children have been used correctly.
1760     if (!ASSERT_DISABLED) {
1761         for (auto& info : m_generationInfo)
1762             RELEASE_ASSERT(!info.alive());
1763     }
1764 }
1765
1766 // If we are making type predictions about our arguments then
1767 // we need to check that they are correct on function entry.
1768 void SpeculativeJIT::checkArgumentTypes()
1769 {
1770     ASSERT(!m_currentNode);
1771     m_origin = NodeOrigin(CodeOrigin(0), CodeOrigin(0), true);
1772
1773     for (int i = 0; i < m_jit.codeBlock()->numParameters(); ++i) {
1774         Node* node = m_jit.graph().m_arguments[i];
1775         if (!node) {
1776             // The argument is dead. We don't do any checks for such arguments.
1777             continue;
1778         }
1779         
1780         ASSERT(node->op() == SetArgument);
1781         ASSERT(node->shouldGenerate());
1782
1783         VariableAccessData* variableAccessData = node->variableAccessData();
1784         FlushFormat format = variableAccessData->flushFormat();
1785         
1786         if (format == FlushedJSValue)
1787             continue;
1788         
1789         VirtualRegister virtualRegister = variableAccessData->local();
1790
1791         JSValueSource valueSource = JSValueSource(JITCompiler::addressFor(virtualRegister));
1792         
1793 #if USE(JSVALUE64)
1794         switch (format) {
1795         case FlushedInt32: {
1796             speculationCheck(BadType, valueSource, node, m_jit.branch64(MacroAssembler::Below, JITCompiler::addressFor(virtualRegister), GPRInfo::tagTypeNumberRegister));
1797             break;
1798         }
1799         case FlushedBoolean: {
1800             GPRTemporary temp(this);
1801             m_jit.load64(JITCompiler::addressFor(virtualRegister), temp.gpr());
1802             m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), temp.gpr());
1803             speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, temp.gpr(), TrustedImm32(static_cast<int32_t>(~1))));
1804             break;
1805         }
1806         case FlushedCell: {
1807             speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, JITCompiler::addressFor(virtualRegister), GPRInfo::tagMaskRegister));
1808             break;
1809         }
1810         default:
1811             RELEASE_ASSERT_NOT_REACHED();
1812             break;
1813         }
1814 #else
1815         switch (format) {
1816         case FlushedInt32: {
1817             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::Int32Tag)));
1818             break;
1819         }
1820         case FlushedBoolean: {
1821             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::BooleanTag)));
1822             break;
1823         }
1824         case FlushedCell: {
1825             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::CellTag)));
1826             break;
1827         }
1828         default:
1829             RELEASE_ASSERT_NOT_REACHED();
1830             break;
1831         }
1832 #endif
1833     }
1834
1835     m_origin = NodeOrigin();
1836 }
1837
1838 bool SpeculativeJIT::compile()
1839 {
1840     checkArgumentTypes();
1841     
1842     ASSERT(!m_currentNode);
1843     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1844         m_jit.setForBlockIndex(blockIndex);
1845         m_block = m_jit.graph().block(blockIndex);
1846         compileCurrentBlock();
1847     }
1848     linkBranches();
1849     return true;
1850 }
1851
1852 void SpeculativeJIT::createOSREntries()
1853 {
1854     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1855         BasicBlock* block = m_jit.graph().block(blockIndex);
1856         if (!block)
1857             continue;
1858         if (!block->isOSRTarget)
1859             continue;
1860         
1861         // Currently we don't have OSR entry trampolines. We could add them
1862         // here if need be.
1863         m_osrEntryHeads.append(m_jit.blockHeads()[blockIndex]);
1864     }
1865 }
1866
1867 void SpeculativeJIT::linkOSREntries(LinkBuffer& linkBuffer)
1868 {
1869     unsigned osrEntryIndex = 0;
1870     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1871         BasicBlock* block = m_jit.graph().block(blockIndex);
1872         if (!block)
1873             continue;
1874         if (!block->isOSRTarget)
1875             continue;
1876         m_jit.noticeOSREntry(*block, m_osrEntryHeads[osrEntryIndex++], linkBuffer);
1877     }
1878     ASSERT(osrEntryIndex == m_osrEntryHeads.size());
1879     
1880     if (verboseCompilationEnabled()) {
1881         DumpContext dumpContext;
1882         dataLog("OSR Entries:\n");
1883         for (OSREntryData& entryData : m_jit.jitCode()->osrEntry)
1884             dataLog("    ", inContext(entryData, &dumpContext), "\n");
1885         if (!dumpContext.isEmpty())
1886             dumpContext.dump(WTF::dataFile());
1887     }
1888 }
1889     
1890 void SpeculativeJIT::compileCheckTraps(Node*)
1891 {
1892     ASSERT(Options::usePollingTraps());
1893     GPRTemporary unused(this);
1894     GPRReg unusedGPR = unused.gpr();
1895
1896     JITCompiler::Jump needTrapHandling = m_jit.branchTest8(JITCompiler::NonZero,
1897         JITCompiler::AbsoluteAddress(m_jit.vm()->needTrapHandlingAddress()));
1898
1899     addSlowPathGenerator(slowPathCall(needTrapHandling, this, operationHandleTraps, unusedGPR));
1900 }
1901
1902 void SpeculativeJIT::compileDoublePutByVal(Node* node, SpeculateCellOperand& base, SpeculateStrictInt32Operand& property)
1903 {
1904     Edge child3 = m_jit.graph().varArgChild(node, 2);
1905     Edge child4 = m_jit.graph().varArgChild(node, 3);
1906
1907     ArrayMode arrayMode = node->arrayMode();
1908     
1909     GPRReg baseReg = base.gpr();
1910     GPRReg propertyReg = property.gpr();
1911     
1912     SpeculateDoubleOperand value(this, child3);
1913
1914     FPRReg valueReg = value.fpr();
1915     
1916     DFG_TYPE_CHECK(
1917         JSValueRegs(), child3, SpecFullRealNumber,
1918         m_jit.branchDouble(
1919             MacroAssembler::DoubleNotEqualOrUnordered, valueReg, valueReg));
1920     
1921     if (!m_compileOkay)
1922         return;
1923     
1924     StorageOperand storage(this, child4);
1925     GPRReg storageReg = storage.gpr();
1926
1927     if (node->op() == PutByValAlias) {
1928         // Store the value to the array.
1929         GPRReg propertyReg = property.gpr();
1930         FPRReg valueReg = value.fpr();
1931         m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
1932         
1933         noResult(m_currentNode);
1934         return;
1935     }
1936     
1937     GPRTemporary temporary;
1938     GPRReg temporaryReg = temporaryRegisterForPutByVal(temporary, node);
1939
1940     MacroAssembler::Jump slowCase;
1941     
1942     if (arrayMode.isInBounds()) {
1943         speculationCheck(
1944             OutOfBounds, JSValueRegs(), 0,
1945             m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())));
1946     } else {
1947         MacroAssembler::Jump inBounds = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
1948         
1949         slowCase = m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfVectorLength()));
1950         
1951         if (!arrayMode.isOutOfBounds())
1952             speculationCheck(OutOfBounds, JSValueRegs(), 0, slowCase);
1953         
1954         m_jit.add32(TrustedImm32(1), propertyReg, temporaryReg);
1955         m_jit.store32(temporaryReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
1956         
1957         inBounds.link(&m_jit);
1958     }
1959     
1960     m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
1961
1962     base.use();
1963     property.use();
1964     value.use();
1965     storage.use();
1966     
1967     if (arrayMode.isOutOfBounds()) {
1968         addSlowPathGenerator(
1969             slowPathCall(
1970                 slowCase, this,
1971                 m_jit.codeBlock()->isStrictMode() ? operationPutDoubleByValBeyondArrayBoundsStrict : operationPutDoubleByValBeyondArrayBoundsNonStrict,
1972                 NoResult, baseReg, propertyReg, valueReg));
1973     }
1974
1975     noResult(m_currentNode, UseChildrenCalledExplicitly);
1976 }
1977
1978 void SpeculativeJIT::compileGetCharCodeAt(Node* node)
1979 {
1980     SpeculateCellOperand string(this, node->child1());
1981     SpeculateStrictInt32Operand index(this, node->child2());
1982     StorageOperand storage(this, node->child3());
1983
1984     GPRReg stringReg = string.gpr();
1985     GPRReg indexReg = index.gpr();
1986     GPRReg storageReg = storage.gpr();
1987     
1988     ASSERT(speculationChecked(m_state.forNode(node->child1()).m_type, SpecString));
1989
1990     // unsigned comparison so we can filter out negative indices and indices that are too large
1991     speculationCheck(Uncountable, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::AboveOrEqual, indexReg, MacroAssembler::Address(stringReg, JSString::offsetOfLength())));
1992
1993     GPRTemporary scratch(this);
1994     GPRReg scratchReg = scratch.gpr();
1995
1996     m_jit.loadPtr(MacroAssembler::Address(stringReg, JSString::offsetOfValue()), scratchReg);
1997
1998     // Load the character into scratchReg
1999     JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
2000
2001     m_jit.load8(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesOne, 0), scratchReg);
2002     JITCompiler::Jump cont8Bit = m_jit.jump();
2003
2004     is16Bit.link(&m_jit);
2005
2006     m_jit.load16(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesTwo, 0), scratchReg);
2007
2008     cont8Bit.link(&m_jit);
2009
2010     int32Result(scratchReg, m_currentNode);
2011 }
2012
2013 void SpeculativeJIT::compileGetByValOnString(Node* node)
2014 {
2015     SpeculateCellOperand base(this, node->child1());
2016     SpeculateStrictInt32Operand property(this, node->child2());
2017     StorageOperand storage(this, node->child3());
2018     GPRReg baseReg = base.gpr();
2019     GPRReg propertyReg = property.gpr();
2020     GPRReg storageReg = storage.gpr();
2021
2022     GPRTemporary scratch(this);
2023     GPRReg scratchReg = scratch.gpr();
2024 #if USE(JSVALUE32_64)
2025     GPRTemporary resultTag;
2026     GPRReg resultTagReg = InvalidGPRReg;
2027     if (node->arrayMode().isOutOfBounds()) {
2028         GPRTemporary realResultTag(this);
2029         resultTag.adopt(realResultTag);
2030         resultTagReg = resultTag.gpr();
2031     }
2032 #endif
2033
2034     ASSERT(ArrayMode(Array::String).alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2035
2036     // unsigned comparison so we can filter out negative indices and indices that are too large
2037     JITCompiler::Jump outOfBounds = m_jit.branch32(
2038         MacroAssembler::AboveOrEqual, propertyReg,
2039         MacroAssembler::Address(baseReg, JSString::offsetOfLength()));
2040     if (node->arrayMode().isInBounds())
2041         speculationCheck(OutOfBounds, JSValueRegs(), 0, outOfBounds);
2042
2043     m_jit.loadPtr(MacroAssembler::Address(baseReg, JSString::offsetOfValue()), scratchReg);
2044
2045     // Load the character into scratchReg
2046     JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
2047
2048     m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne, 0), scratchReg);
2049     JITCompiler::Jump cont8Bit = m_jit.jump();
2050
2051     is16Bit.link(&m_jit);
2052
2053     m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo, 0), scratchReg);
2054
2055     JITCompiler::Jump bigCharacter =
2056         m_jit.branch32(MacroAssembler::AboveOrEqual, scratchReg, TrustedImm32(0x100));
2057
2058     // 8 bit string values don't need the isASCII check.
2059     cont8Bit.link(&m_jit);
2060
2061     m_jit.lshift32(MacroAssembler::TrustedImm32(sizeof(void*) == 4 ? 2 : 3), scratchReg);
2062     m_jit.addPtr(TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), scratchReg);
2063     m_jit.loadPtr(scratchReg, scratchReg);
2064
2065     addSlowPathGenerator(
2066         slowPathCall(
2067             bigCharacter, this, operationSingleCharacterString, scratchReg, scratchReg));
2068
2069     if (node->arrayMode().isOutOfBounds()) {
2070 #if USE(JSVALUE32_64)
2071         m_jit.move(TrustedImm32(JSValue::CellTag), resultTagReg);
2072 #endif
2073
2074         JSGlobalObject* globalObject = m_jit.globalObjectFor(node->origin.semantic);
2075         bool prototypeChainIsSane = false;
2076         if (globalObject->stringPrototypeChainIsSane()) {
2077             // FIXME: This could be captured using a Speculation mode that means "out-of-bounds
2078             // loads return a trivial value". Something like SaneChainOutOfBounds. This should
2079             // speculate that we don't take negative out-of-bounds, or better yet, it should rely
2080             // on a stringPrototypeChainIsSane() guaranteeing that the prototypes have no negative
2081             // indexed properties either.
2082             // https://bugs.webkit.org/show_bug.cgi?id=144668
2083             m_jit.graph().watchpoints().addLazily(globalObject->stringPrototype()->structure()->transitionWatchpointSet());
2084             m_jit.graph().watchpoints().addLazily(globalObject->objectPrototype()->structure()->transitionWatchpointSet());
2085             prototypeChainIsSane = globalObject->stringPrototypeChainIsSane();
2086         }
2087         if (prototypeChainIsSane) {
2088             m_jit.graph().watchpoints().addLazily(globalObject->stringPrototype()->structure()->transitionWatchpointSet());
2089             m_jit.graph().watchpoints().addLazily(globalObject->objectPrototype()->structure()->transitionWatchpointSet());
2090             
2091 #if USE(JSVALUE64)
2092             addSlowPathGenerator(std::make_unique<SaneStringGetByValSlowPathGenerator>(
2093                 outOfBounds, this, JSValueRegs(scratchReg), baseReg, propertyReg));
2094 #else
2095             addSlowPathGenerator(std::make_unique<SaneStringGetByValSlowPathGenerator>(
2096                 outOfBounds, this, JSValueRegs(resultTagReg, scratchReg),
2097                 baseReg, propertyReg));
2098 #endif
2099         } else {
2100 #if USE(JSVALUE64)
2101             addSlowPathGenerator(
2102                 slowPathCall(
2103                     outOfBounds, this, operationGetByValStringInt,
2104                     scratchReg, baseReg, propertyReg));
2105 #else
2106             addSlowPathGenerator(
2107                 slowPathCall(
2108                     outOfBounds, this, operationGetByValStringInt,
2109                     JSValueRegs(resultTagReg, scratchReg), baseReg, propertyReg));
2110 #endif
2111         }
2112         
2113 #if USE(JSVALUE64)
2114         jsValueResult(scratchReg, m_currentNode);
2115 #else
2116         jsValueResult(resultTagReg, scratchReg, m_currentNode);
2117 #endif
2118     } else
2119         cellResult(scratchReg, m_currentNode);
2120 }
2121
2122 void SpeculativeJIT::compileFromCharCode(Node* node)
2123 {
2124     Edge& child = node->child1();
2125     if (child.useKind() == UntypedUse) {
2126         JSValueOperand opr(this, child);
2127         JSValueRegs oprRegs = opr.jsValueRegs();
2128 #if USE(JSVALUE64)
2129         GPRTemporary result(this);
2130         JSValueRegs resultRegs = JSValueRegs(result.gpr());
2131 #else
2132         GPRTemporary resultTag(this);
2133         GPRTemporary resultPayload(this);
2134         JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
2135 #endif
2136         flushRegisters();
2137         callOperation(operationStringFromCharCodeUntyped, resultRegs, oprRegs);
2138         m_jit.exceptionCheck();
2139         
2140         jsValueResult(resultRegs, node);
2141         return;
2142     }
2143
2144     SpeculateStrictInt32Operand property(this, child);
2145     GPRReg propertyReg = property.gpr();
2146     GPRTemporary smallStrings(this);
2147     GPRTemporary scratch(this);
2148     GPRReg scratchReg = scratch.gpr();
2149     GPRReg smallStringsReg = smallStrings.gpr();
2150
2151     JITCompiler::JumpList slowCases;
2152     slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, TrustedImm32(0xff)));
2153     m_jit.move(TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), smallStringsReg);
2154     m_jit.loadPtr(MacroAssembler::BaseIndex(smallStringsReg, propertyReg, MacroAssembler::ScalePtr, 0), scratchReg);
2155
2156     slowCases.append(m_jit.branchTest32(MacroAssembler::Zero, scratchReg));
2157     addSlowPathGenerator(slowPathCall(slowCases, this, operationStringFromCharCode, scratchReg, propertyReg));
2158     cellResult(scratchReg, m_currentNode);
2159 }
2160
2161 GeneratedOperandType SpeculativeJIT::checkGeneratedTypeForToInt32(Node* node)
2162 {
2163     VirtualRegister virtualRegister = node->virtualRegister();
2164     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
2165
2166     switch (info.registerFormat()) {
2167     case DataFormatStorage:
2168         RELEASE_ASSERT_NOT_REACHED();
2169
2170     case DataFormatBoolean:
2171     case DataFormatCell:
2172         terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
2173         return GeneratedOperandTypeUnknown;
2174
2175     case DataFormatNone:
2176     case DataFormatJSCell:
2177     case DataFormatJS:
2178     case DataFormatJSBoolean:
2179     case DataFormatJSDouble:
2180         return GeneratedOperandJSValue;
2181
2182     case DataFormatJSInt32:
2183     case DataFormatInt32:
2184         return GeneratedOperandInteger;
2185
2186     default:
2187         RELEASE_ASSERT_NOT_REACHED();
2188         return GeneratedOperandTypeUnknown;
2189     }
2190 }
2191
2192 void SpeculativeJIT::compileValueToInt32(Node* node)
2193 {
2194     switch (node->child1().useKind()) {
2195 #if USE(JSVALUE64)
2196     case Int52RepUse: {
2197         SpeculateStrictInt52Operand op1(this, node->child1());
2198         GPRTemporary result(this, Reuse, op1);
2199         GPRReg op1GPR = op1.gpr();
2200         GPRReg resultGPR = result.gpr();
2201         m_jit.zeroExtend32ToPtr(op1GPR, resultGPR);
2202         int32Result(resultGPR, node, DataFormatInt32);
2203         return;
2204     }
2205 #endif // USE(JSVALUE64)
2206         
2207     case DoubleRepUse: {
2208         GPRTemporary result(this);
2209         SpeculateDoubleOperand op1(this, node->child1());
2210         FPRReg fpr = op1.fpr();
2211         GPRReg gpr = result.gpr();
2212         JITCompiler::Jump notTruncatedToInteger = m_jit.branchTruncateDoubleToInt32(fpr, gpr, JITCompiler::BranchIfTruncateFailed);
2213         
2214         addSlowPathGenerator(slowPathCall(notTruncatedToInteger, this,
2215             hasSensibleDoubleToInt() ? operationToInt32SensibleSlow : operationToInt32, NeedToSpill, ExceptionCheckRequirement::CheckNotNeeded, gpr, fpr));
2216         
2217         int32Result(gpr, node);
2218         return;
2219     }
2220     
2221     case NumberUse:
2222     case NotCellUse: {
2223         switch (checkGeneratedTypeForToInt32(node->child1().node())) {
2224         case GeneratedOperandInteger: {
2225             SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
2226             GPRTemporary result(this, Reuse, op1);
2227             m_jit.move(op1.gpr(), result.gpr());
2228             int32Result(result.gpr(), node, op1.format());
2229             return;
2230         }
2231         case GeneratedOperandJSValue: {
2232             GPRTemporary result(this);
2233 #if USE(JSVALUE64)
2234             JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2235
2236             GPRReg gpr = op1.gpr();
2237             GPRReg resultGpr = result.gpr();
2238             FPRTemporary tempFpr(this);
2239             FPRReg fpr = tempFpr.fpr();
2240
2241             JITCompiler::Jump isInteger = m_jit.branch64(MacroAssembler::AboveOrEqual, gpr, GPRInfo::tagTypeNumberRegister);
2242             JITCompiler::JumpList converted;
2243
2244             if (node->child1().useKind() == NumberUse) {
2245                 DFG_TYPE_CHECK(
2246                     JSValueRegs(gpr), node->child1(), SpecBytecodeNumber,
2247                     m_jit.branchTest64(
2248                         MacroAssembler::Zero, gpr, GPRInfo::tagTypeNumberRegister));
2249             } else {
2250                 JITCompiler::Jump isNumber = m_jit.branchTest64(MacroAssembler::NonZero, gpr, GPRInfo::tagTypeNumberRegister);
2251                 
2252                 DFG_TYPE_CHECK(
2253                     JSValueRegs(gpr), node->child1(), ~SpecCellCheck, m_jit.branchIfCell(JSValueRegs(gpr)));
2254                 
2255                 // It's not a cell: so true turns into 1 and all else turns into 0.
2256                 m_jit.compare64(JITCompiler::Equal, gpr, TrustedImm32(ValueTrue), resultGpr);
2257                 converted.append(m_jit.jump());
2258                 
2259                 isNumber.link(&m_jit);
2260             }
2261
2262             // First, if we get here we have a double encoded as a JSValue
2263             unboxDouble(gpr, resultGpr, fpr);
2264
2265             silentSpillAllRegisters(resultGpr);
2266             callOperation(operationToInt32, resultGpr, fpr);
2267             silentFillAllRegisters();
2268
2269             converted.append(m_jit.jump());
2270
2271             isInteger.link(&m_jit);
2272             m_jit.zeroExtend32ToPtr(gpr, resultGpr);
2273
2274             converted.link(&m_jit);
2275 #else
2276             Node* childNode = node->child1().node();
2277             VirtualRegister virtualRegister = childNode->virtualRegister();
2278             GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
2279
2280             JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2281
2282             GPRReg payloadGPR = op1.payloadGPR();
2283             GPRReg resultGpr = result.gpr();
2284         
2285             JITCompiler::JumpList converted;
2286
2287             if (info.registerFormat() == DataFormatJSInt32)
2288                 m_jit.move(payloadGPR, resultGpr);
2289             else {
2290                 GPRReg tagGPR = op1.tagGPR();
2291                 FPRTemporary tempFpr(this);
2292                 FPRReg fpr = tempFpr.fpr();
2293                 FPRTemporary scratch(this);
2294
2295                 JITCompiler::Jump isInteger = m_jit.branch32(MacroAssembler::Equal, tagGPR, TrustedImm32(JSValue::Int32Tag));
2296
2297                 if (node->child1().useKind() == NumberUse) {
2298                     DFG_TYPE_CHECK(
2299                         op1.jsValueRegs(), node->child1(), SpecBytecodeNumber,
2300                         m_jit.branch32(
2301                             MacroAssembler::AboveOrEqual, tagGPR,
2302                             TrustedImm32(JSValue::LowestTag)));
2303                 } else {
2304                     JITCompiler::Jump isNumber = m_jit.branch32(MacroAssembler::Below, tagGPR, TrustedImm32(JSValue::LowestTag));
2305                     
2306                     DFG_TYPE_CHECK(
2307                         op1.jsValueRegs(), node->child1(), ~SpecCell,
2308                         m_jit.branchIfCell(op1.jsValueRegs()));
2309                     
2310                     // It's not a cell: so true turns into 1 and all else turns into 0.
2311                     JITCompiler::Jump isBoolean = m_jit.branch32(JITCompiler::Equal, tagGPR, TrustedImm32(JSValue::BooleanTag));
2312                     m_jit.move(TrustedImm32(0), resultGpr);
2313                     converted.append(m_jit.jump());
2314                     
2315                     isBoolean.link(&m_jit);
2316                     m_jit.move(payloadGPR, resultGpr);
2317                     converted.append(m_jit.jump());
2318                     
2319                     isNumber.link(&m_jit);
2320                 }
2321
2322                 unboxDouble(tagGPR, payloadGPR, fpr, scratch.fpr());
2323
2324                 silentSpillAllRegisters(resultGpr);
2325                 callOperation(operationToInt32, resultGpr, fpr);
2326                 silentFillAllRegisters();
2327
2328                 converted.append(m_jit.jump());
2329
2330                 isInteger.link(&m_jit);
2331                 m_jit.move(payloadGPR, resultGpr);
2332
2333                 converted.link(&m_jit);
2334             }
2335 #endif
2336             int32Result(resultGpr, node);
2337             return;
2338         }
2339         case GeneratedOperandTypeUnknown:
2340             RELEASE_ASSERT(!m_compileOkay);
2341             return;
2342         }
2343         RELEASE_ASSERT_NOT_REACHED();
2344         return;
2345     }
2346     
2347     default:
2348         ASSERT(!m_compileOkay);
2349         return;
2350     }
2351 }
2352
2353 void SpeculativeJIT::compileUInt32ToNumber(Node* node)
2354 {
2355     if (doesOverflow(node->arithMode())) {
2356         if (enableInt52()) {
2357             SpeculateInt32Operand op1(this, node->child1());
2358             GPRTemporary result(this, Reuse, op1);
2359             m_jit.zeroExtend32ToPtr(op1.gpr(), result.gpr());
2360             strictInt52Result(result.gpr(), node);
2361             return;
2362         }
2363         SpeculateInt32Operand op1(this, node->child1());
2364         FPRTemporary result(this);
2365             
2366         GPRReg inputGPR = op1.gpr();
2367         FPRReg outputFPR = result.fpr();
2368             
2369         m_jit.convertInt32ToDouble(inputGPR, outputFPR);
2370             
2371         JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, inputGPR, TrustedImm32(0));
2372         m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), outputFPR);
2373         positive.link(&m_jit);
2374             
2375         doubleResult(outputFPR, node);
2376         return;
2377     }
2378     
2379     RELEASE_ASSERT(node->arithMode() == Arith::CheckOverflow);
2380
2381     SpeculateInt32Operand op1(this, node->child1());
2382     GPRTemporary result(this);
2383
2384     m_jit.move(op1.gpr(), result.gpr());
2385
2386     speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, result.gpr(), TrustedImm32(0)));
2387
2388     int32Result(result.gpr(), node, op1.format());
2389 }
2390
2391 void SpeculativeJIT::compileDoubleAsInt32(Node* node)
2392 {
2393     SpeculateDoubleOperand op1(this, node->child1());
2394     FPRTemporary scratch(this);
2395     GPRTemporary result(this);
2396     
2397     FPRReg valueFPR = op1.fpr();
2398     FPRReg scratchFPR = scratch.fpr();
2399     GPRReg resultGPR = result.gpr();
2400
2401     JITCompiler::JumpList failureCases;
2402     RELEASE_ASSERT(shouldCheckOverflow(node->arithMode()));
2403     m_jit.branchConvertDoubleToInt32(
2404         valueFPR, resultGPR, failureCases, scratchFPR,
2405         shouldCheckNegativeZero(node->arithMode()));
2406     speculationCheck(Overflow, JSValueRegs(), 0, failureCases);
2407
2408     int32Result(resultGPR, node);
2409 }
2410
2411 void SpeculativeJIT::compileDoubleRep(Node* node)
2412 {
2413     switch (node->child1().useKind()) {
2414     case RealNumberUse: {
2415         JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2416         FPRTemporary result(this);
2417         
2418         JSValueRegs op1Regs = op1.jsValueRegs();
2419         FPRReg resultFPR = result.fpr();
2420         
2421 #if USE(JSVALUE64)
2422         GPRTemporary temp(this);
2423         GPRReg tempGPR = temp.gpr();
2424         m_jit.unboxDoubleWithoutAssertions(op1Regs.gpr(), tempGPR, resultFPR);
2425 #else
2426         FPRTemporary temp(this);
2427         FPRReg tempFPR = temp.fpr();
2428         unboxDouble(op1Regs.tagGPR(), op1Regs.payloadGPR(), resultFPR, tempFPR);
2429 #endif
2430         
2431         JITCompiler::Jump done = m_jit.branchDouble(
2432             JITCompiler::DoubleEqual, resultFPR, resultFPR);
2433         
2434         DFG_TYPE_CHECK(
2435             op1Regs, node->child1(), SpecBytecodeRealNumber, m_jit.branchIfNotInt32(op1Regs));
2436         m_jit.convertInt32ToDouble(op1Regs.payloadGPR(), resultFPR);
2437         
2438         done.link(&m_jit);
2439         
2440         doubleResult(resultFPR, node);
2441         return;
2442     }
2443     
2444     case NotCellUse:
2445     case NumberUse: {
2446         ASSERT(!node->child1()->isNumberConstant()); // This should have been constant folded.
2447
2448         SpeculatedType possibleTypes = m_state.forNode(node->child1()).m_type;
2449         if (isInt32Speculation(possibleTypes)) {
2450             SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
2451             FPRTemporary result(this);
2452             m_jit.convertInt32ToDouble(op1.gpr(), result.fpr());
2453             doubleResult(result.fpr(), node);
2454             return;
2455         }
2456
2457         JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2458         FPRTemporary result(this);
2459
2460 #if USE(JSVALUE64)
2461         GPRTemporary temp(this);
2462
2463         GPRReg op1GPR = op1.gpr();
2464         GPRReg tempGPR = temp.gpr();
2465         FPRReg resultFPR = result.fpr();
2466         JITCompiler::JumpList done;
2467
2468         JITCompiler::Jump isInteger = m_jit.branch64(
2469             MacroAssembler::AboveOrEqual, op1GPR, GPRInfo::tagTypeNumberRegister);
2470
2471         if (node->child1().useKind() == NotCellUse) {
2472             JITCompiler::Jump isNumber = m_jit.branchTest64(MacroAssembler::NonZero, op1GPR, GPRInfo::tagTypeNumberRegister);
2473             JITCompiler::Jump isUndefined = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueUndefined));
2474
2475             static const double zero = 0;
2476             m_jit.loadDouble(TrustedImmPtr(&zero), resultFPR);
2477
2478             JITCompiler::Jump isNull = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueNull));
2479             done.append(isNull);
2480
2481             DFG_TYPE_CHECK(JSValueRegs(op1GPR), node->child1(), ~SpecCellCheck,
2482                 m_jit.branchTest64(JITCompiler::Zero, op1GPR, TrustedImm32(static_cast<int32_t>(TagBitBool))));
2483
2484             JITCompiler::Jump isFalse = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueFalse));
2485             static const double one = 1;
2486             m_jit.loadDouble(TrustedImmPtr(&one), resultFPR);
2487             done.append(m_jit.jump());
2488             done.append(isFalse);
2489
2490             isUndefined.link(&m_jit);
2491             static const double NaN = PNaN;
2492             m_jit.loadDouble(TrustedImmPtr(&NaN), resultFPR);
2493             done.append(m_jit.jump());
2494
2495             isNumber.link(&m_jit);
2496         } else if (needsTypeCheck(node->child1(), SpecBytecodeNumber)) {
2497             typeCheck(
2498                 JSValueRegs(op1GPR), node->child1(), SpecBytecodeNumber,
2499                 m_jit.branchTest64(MacroAssembler::Zero, op1GPR, GPRInfo::tagTypeNumberRegister));
2500         }
2501
2502         unboxDouble(op1GPR, tempGPR, resultFPR);
2503         done.append(m_jit.jump());
2504     
2505         isInteger.link(&m_jit);
2506         m_jit.convertInt32ToDouble(op1GPR, resultFPR);
2507         done.link(&m_jit);
2508 #else // USE(JSVALUE64) -> this is the 32_64 case
2509         FPRTemporary temp(this);
2510     
2511         GPRReg op1TagGPR = op1.tagGPR();
2512         GPRReg op1PayloadGPR = op1.payloadGPR();
2513         FPRReg tempFPR = temp.fpr();
2514         FPRReg resultFPR = result.fpr();
2515         JITCompiler::JumpList done;
2516     
2517         JITCompiler::Jump isInteger = m_jit.branch32(
2518             MacroAssembler::Equal, op1TagGPR, TrustedImm32(JSValue::Int32Tag));
2519
2520         if (node->child1().useKind() == NotCellUse) {
2521             JITCompiler::Jump isNumber = m_jit.branch32(JITCompiler::Below, op1TagGPR, JITCompiler::TrustedImm32(JSValue::LowestTag + 1));
2522             JITCompiler::Jump isUndefined = m_jit.branch32(JITCompiler::Equal, op1TagGPR, TrustedImm32(JSValue::UndefinedTag));
2523
2524             static const double zero = 0;
2525             m_jit.loadDouble(TrustedImmPtr(&zero), resultFPR);
2526
2527             JITCompiler::Jump isNull = m_jit.branch32(JITCompiler::Equal, op1TagGPR, TrustedImm32(JSValue::NullTag));
2528             done.append(isNull);
2529
2530             DFG_TYPE_CHECK(JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), ~SpecCell, m_jit.branch32(JITCompiler::NotEqual, op1TagGPR, TrustedImm32(JSValue::BooleanTag)));
2531
2532             JITCompiler::Jump isFalse = m_jit.branchTest32(JITCompiler::Zero, op1PayloadGPR, TrustedImm32(1));
2533             static const double one = 1;
2534             m_jit.loadDouble(TrustedImmPtr(&one), resultFPR);
2535             done.append(m_jit.jump());
2536             done.append(isFalse);
2537
2538             isUndefined.link(&m_jit);
2539             static const double NaN = PNaN;
2540             m_jit.loadDouble(TrustedImmPtr(&NaN), resultFPR);
2541             done.append(m_jit.jump());
2542
2543             isNumber.link(&m_jit);
2544         } else if (needsTypeCheck(node->child1(), SpecBytecodeNumber)) {
2545             typeCheck(
2546                 JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), SpecBytecodeNumber,
2547                 m_jit.branch32(MacroAssembler::AboveOrEqual, op1TagGPR, TrustedImm32(JSValue::LowestTag)));
2548         }
2549
2550         unboxDouble(op1TagGPR, op1PayloadGPR, resultFPR, tempFPR);
2551         done.append(m_jit.jump());
2552     
2553         isInteger.link(&m_jit);
2554         m_jit.convertInt32ToDouble(op1PayloadGPR, resultFPR);
2555         done.link(&m_jit);
2556 #endif // USE(JSVALUE64)
2557     
2558         doubleResult(resultFPR, node);
2559         return;
2560     }
2561         
2562 #if USE(JSVALUE64)
2563     case Int52RepUse: {
2564         SpeculateStrictInt52Operand value(this, node->child1());
2565         FPRTemporary result(this);
2566         
2567         GPRReg valueGPR = value.gpr();
2568         FPRReg resultFPR = result.fpr();
2569
2570         m_jit.convertInt64ToDouble(valueGPR, resultFPR);
2571         
2572         doubleResult(resultFPR, node);
2573         return;
2574     }
2575 #endif // USE(JSVALUE64)
2576         
2577     default:
2578         RELEASE_ASSERT_NOT_REACHED();
2579         return;
2580     }
2581 }
2582
2583 void SpeculativeJIT::compileValueRep(Node* node)
2584 {
2585     switch (node->child1().useKind()) {
2586     case DoubleRepUse: {
2587         SpeculateDoubleOperand value(this, node->child1());
2588         JSValueRegsTemporary result(this);
2589         
2590         FPRReg valueFPR = value.fpr();
2591         JSValueRegs resultRegs = result.regs();
2592         
2593         // It's very tempting to in-place filter the value to indicate that it's not impure NaN
2594         // anymore. Unfortunately, this would be unsound. If it's a GetLocal or if the value was
2595         // subject to a prior SetLocal, filtering the value would imply that the corresponding
2596         // local was purified.
2597         if (needsTypeCheck(node->child1(), ~SpecDoubleImpureNaN))
2598             m_jit.purifyNaN(valueFPR);
2599
2600         boxDouble(valueFPR, resultRegs);
2601         
2602         jsValueResult(resultRegs, node);
2603         return;
2604     }
2605         
2606 #if USE(JSVALUE64)
2607     case Int52RepUse: {
2608         SpeculateStrictInt52Operand value(this, node->child1());
2609         GPRTemporary result(this);
2610         
2611         GPRReg valueGPR = value.gpr();
2612         GPRReg resultGPR = result.gpr();
2613         
2614         boxInt52(valueGPR, resultGPR, DataFormatStrictInt52);
2615         
2616         jsValueResult(resultGPR, node);
2617         return;
2618     }
2619 #endif // USE(JSVALUE64)
2620         
2621     default:
2622         RELEASE_ASSERT_NOT_REACHED();
2623         return;
2624     }
2625 }
2626
2627 static double clampDoubleToByte(double d)
2628 {
2629     d += 0.5;
2630     if (!(d > 0))
2631         d = 0;
2632     else if (d > 255)
2633         d = 255;
2634     return d;
2635 }
2636
2637 static void compileClampIntegerToByte(JITCompiler& jit, GPRReg result)
2638 {
2639     MacroAssembler::Jump inBounds = jit.branch32(MacroAssembler::BelowOrEqual, result, JITCompiler::TrustedImm32(0xff));
2640     MacroAssembler::Jump tooBig = jit.branch32(MacroAssembler::GreaterThan, result, JITCompiler::TrustedImm32(0xff));
2641     jit.xorPtr(result, result);
2642     MacroAssembler::Jump clamped = jit.jump();
2643     tooBig.link(&jit);
2644     jit.move(JITCompiler::TrustedImm32(255), result);
2645     clamped.link(&jit);
2646     inBounds.link(&jit);
2647 }
2648
2649 static void compileClampDoubleToByte(JITCompiler& jit, GPRReg result, FPRReg source, FPRReg scratch)
2650 {
2651     // Unordered compare so we pick up NaN
2652     static const double zero = 0;
2653     static const double byteMax = 255;
2654     static const double half = 0.5;
2655     jit.loadDouble(JITCompiler::TrustedImmPtr(&zero), scratch);
2656     MacroAssembler::Jump tooSmall = jit.branchDouble(MacroAssembler::DoubleLessThanOrEqualOrUnordered, source, scratch);
2657     jit.loadDouble(JITCompiler::TrustedImmPtr(&byteMax), scratch);
2658     MacroAssembler::Jump tooBig = jit.branchDouble(MacroAssembler::DoubleGreaterThan, source, scratch);
2659     
2660     jit.loadDouble(JITCompiler::TrustedImmPtr(&half), scratch);
2661     // FIXME: This should probably just use a floating point round!
2662     // https://bugs.webkit.org/show_bug.cgi?id=72054
2663     jit.addDouble(source, scratch);
2664     jit.truncateDoubleToInt32(scratch, result);   
2665     MacroAssembler::Jump truncatedInt = jit.jump();
2666     
2667     tooSmall.link(&jit);
2668     jit.xorPtr(result, result);
2669     MacroAssembler::Jump zeroed = jit.jump();
2670     
2671     tooBig.link(&jit);
2672     jit.move(JITCompiler::TrustedImm32(255), result);
2673     
2674     truncatedInt.link(&jit);
2675     zeroed.link(&jit);
2676
2677 }
2678
2679 JITCompiler::Jump SpeculativeJIT::jumpForTypedArrayOutOfBounds(Node* node, GPRReg baseGPR, GPRReg indexGPR)
2680 {
2681     if (node->op() == PutByValAlias)
2682         return JITCompiler::Jump();
2683     JSArrayBufferView* view = m_jit.graph().tryGetFoldableView(
2684         m_state.forNode(m_jit.graph().child(node, 0)).m_value, node->arrayMode());
2685     if (view) {
2686         uint32_t length = view->length();
2687         Node* indexNode = m_jit.graph().child(node, 1).node();
2688         if (indexNode->isInt32Constant() && indexNode->asUInt32() < length)
2689             return JITCompiler::Jump();
2690         return m_jit.branch32(
2691             MacroAssembler::AboveOrEqual, indexGPR, MacroAssembler::Imm32(length));
2692     }
2693     return m_jit.branch32(
2694         MacroAssembler::AboveOrEqual, indexGPR,
2695         MacroAssembler::Address(baseGPR, JSArrayBufferView::offsetOfLength()));
2696 }
2697
2698 void SpeculativeJIT::emitTypedArrayBoundsCheck(Node* node, GPRReg baseGPR, GPRReg indexGPR)
2699 {
2700     JITCompiler::Jump jump = jumpForTypedArrayOutOfBounds(node, baseGPR, indexGPR);
2701     if (!jump.isSet())
2702         return;
2703     speculationCheck(OutOfBounds, JSValueRegs(), 0, jump);
2704 }
2705
2706 JITCompiler::Jump SpeculativeJIT::jumpForTypedArrayIsNeuteredIfOutOfBounds(Node* node, GPRReg base, JITCompiler::Jump outOfBounds)
2707 {
2708     JITCompiler::Jump done;
2709     if (outOfBounds.isSet()) {
2710         done = m_jit.jump();
2711         if (node->arrayMode().isInBounds())
2712             speculationCheck(OutOfBounds, JSValueSource(), 0, outOfBounds);
2713         else {
2714             outOfBounds.link(&m_jit);
2715
2716             JITCompiler::Jump notWasteful = m_jit.branch32(
2717                 MacroAssembler::NotEqual,
2718                 MacroAssembler::Address(base, JSArrayBufferView::offsetOfMode()),
2719                 TrustedImm32(WastefulTypedArray));
2720
2721             JITCompiler::Jump hasNullVector = m_jit.branchTestPtr(
2722                 MacroAssembler::Zero,
2723                 MacroAssembler::Address(base, JSArrayBufferView::offsetOfVector()));
2724             speculationCheck(Uncountable, JSValueSource(), node, hasNullVector);
2725             notWasteful.link(&m_jit);
2726         }
2727     }
2728     return done;
2729 }
2730
2731 void SpeculativeJIT::loadFromIntTypedArray(GPRReg storageReg, GPRReg propertyReg, GPRReg resultReg, TypedArrayType type)
2732 {
2733     switch (elementSize(type)) {
2734     case 1:
2735         if (isSigned(type))
2736             m_jit.load8SignedExtendTo32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2737         else
2738             m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2739         break;
2740     case 2:
2741         if (isSigned(type))
2742             m_jit.load16SignedExtendTo32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2743         else
2744             m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2745         break;
2746     case 4:
2747         m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
2748         break;
2749     default:
2750         CRASH();
2751     }
2752 }
2753
2754 void SpeculativeJIT::setIntTypedArrayLoadResult(Node* node, GPRReg resultReg, TypedArrayType type, bool canSpeculate)
2755 {
2756     if (elementSize(type) < 4 || isSigned(type)) {
2757         int32Result(resultReg, node);
2758         return;
2759     }
2760     
2761     ASSERT(elementSize(type) == 4 && !isSigned(type));
2762     if (node->shouldSpeculateInt32() && canSpeculate) {
2763         speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, resultReg, TrustedImm32(0)));
2764         int32Result(resultReg, node);
2765         return;
2766     }
2767     
2768 #if USE(JSVALUE64)
2769     if (node->shouldSpeculateAnyInt()) {
2770         m_jit.zeroExtend32ToPtr(resultReg, resultReg);
2771         strictInt52Result(resultReg, node);
2772         return;
2773     }
2774 #endif
2775     
2776     FPRTemporary fresult(this);
2777     m_jit.convertInt32ToDouble(resultReg, fresult.fpr());
2778     JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, resultReg, TrustedImm32(0));
2779     m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), fresult.fpr());
2780     positive.link(&m_jit);
2781     doubleResult(fresult.fpr(), node);
2782 }
2783
2784 void SpeculativeJIT::compileGetByValOnIntTypedArray(Node* node, TypedArrayType type)
2785 {
2786     ASSERT(isInt(type));
2787     
2788     SpeculateCellOperand base(this, node->child1());
2789     SpeculateStrictInt32Operand property(this, node->child2());
2790     StorageOperand storage(this, node->child3());
2791
2792     GPRReg baseReg = base.gpr();
2793     GPRReg propertyReg = property.gpr();
2794     GPRReg storageReg = storage.gpr();
2795
2796     GPRTemporary result(this);
2797     GPRReg resultReg = result.gpr();
2798
2799     ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2800
2801     emitTypedArrayBoundsCheck(node, baseReg, propertyReg);
2802     loadFromIntTypedArray(storageReg, propertyReg, resultReg, type);
2803     bool canSpeculate = true;
2804     setIntTypedArrayLoadResult(node, resultReg, type, canSpeculate);
2805 }
2806
2807 bool SpeculativeJIT::getIntTypedArrayStoreOperand(
2808     GPRTemporary& value,
2809     GPRReg property,
2810 #if USE(JSVALUE32_64)
2811     GPRTemporary& propertyTag,
2812     GPRTemporary& valueTag,
2813 #endif
2814     Edge valueUse, JITCompiler::JumpList& slowPathCases, bool isClamped)
2815 {
2816     bool isAppropriateConstant = false;
2817     if (valueUse->isConstant()) {
2818         JSValue jsValue = valueUse->asJSValue();
2819         SpeculatedType expectedType = typeFilterFor(valueUse.useKind());
2820         SpeculatedType actualType = speculationFromValue(jsValue);
2821         isAppropriateConstant = (expectedType | actualType) == expectedType;
2822     }
2823     
2824     if (isAppropriateConstant) {
2825         JSValue jsValue = valueUse->asJSValue();
2826         if (!jsValue.isNumber()) {
2827             terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
2828             return false;
2829         }
2830         double d = jsValue.asNumber();
2831         if (isClamped)
2832             d = clampDoubleToByte(d);
2833         GPRTemporary scratch(this);
2834         GPRReg scratchReg = scratch.gpr();
2835         m_jit.move(Imm32(toInt32(d)), scratchReg);
2836         value.adopt(scratch);
2837     } else {
2838         switch (valueUse.useKind()) {
2839         case Int32Use: {
2840             SpeculateInt32Operand valueOp(this, valueUse);
2841             GPRTemporary scratch(this);
2842             GPRReg scratchReg = scratch.gpr();
2843             m_jit.move(valueOp.gpr(), scratchReg);
2844             if (isClamped)
2845                 compileClampIntegerToByte(m_jit, scratchReg);
2846             value.adopt(scratch);
2847             break;
2848         }
2849             
2850 #if USE(JSVALUE64)
2851         case Int52RepUse: {
2852             SpeculateStrictInt52Operand valueOp(this, valueUse);
2853             GPRTemporary scratch(this);
2854             GPRReg scratchReg = scratch.gpr();
2855             m_jit.move(valueOp.gpr(), scratchReg);
2856             if (isClamped) {
2857                 MacroAssembler::Jump inBounds = m_jit.branch64(
2858                     MacroAssembler::BelowOrEqual, scratchReg, JITCompiler::TrustedImm64(0xff));
2859                 MacroAssembler::Jump tooBig = m_jit.branch64(
2860                     MacroAssembler::GreaterThan, scratchReg, JITCompiler::TrustedImm64(0xff));
2861                 m_jit.move(TrustedImm32(0), scratchReg);
2862                 MacroAssembler::Jump clamped = m_jit.jump();
2863                 tooBig.link(&m_jit);
2864                 m_jit.move(JITCompiler::TrustedImm32(255), scratchReg);
2865                 clamped.link(&m_jit);
2866                 inBounds.link(&m_jit);
2867             }
2868             value.adopt(scratch);
2869             break;
2870         }
2871 #endif // USE(JSVALUE64)
2872             
2873         case DoubleRepUse: {
2874             RELEASE_ASSERT(!isAtomicsIntrinsic(m_currentNode->op()));
2875             if (isClamped) {
2876                 SpeculateDoubleOperand valueOp(this, valueUse);
2877                 GPRTemporary result(this);
2878                 FPRTemporary floatScratch(this);
2879                 FPRReg fpr = valueOp.fpr();
2880                 GPRReg gpr = result.gpr();
2881                 compileClampDoubleToByte(m_jit, gpr, fpr, floatScratch.fpr());
2882                 value.adopt(result);
2883             } else {
2884 #if USE(JSVALUE32_64)
2885                 GPRTemporary realPropertyTag(this);
2886                 propertyTag.adopt(realPropertyTag);
2887                 GPRReg propertyTagGPR = propertyTag.gpr();
2888
2889                 GPRTemporary realValueTag(this);
2890                 valueTag.adopt(realValueTag);
2891                 GPRReg valueTagGPR = valueTag.gpr();
2892 #endif
2893                 SpeculateDoubleOperand valueOp(this, valueUse);
2894                 GPRTemporary result(this);
2895                 FPRReg fpr = valueOp.fpr();
2896                 GPRReg gpr = result.gpr();
2897                 MacroAssembler::Jump notNaN = m_jit.branchDouble(MacroAssembler::DoubleEqual, fpr, fpr);
2898                 m_jit.xorPtr(gpr, gpr);
2899                 MacroAssembler::JumpList fixed(m_jit.jump());
2900                 notNaN.link(&m_jit);
2901
2902                 fixed.append(m_jit.branchTruncateDoubleToInt32(
2903                     fpr, gpr, MacroAssembler::BranchIfTruncateSuccessful));
2904
2905 #if USE(JSVALUE64)
2906                 m_jit.or64(GPRInfo::tagTypeNumberRegister, property);
2907                 boxDouble(fpr, gpr);
2908 #else
2909                 UNUSED_PARAM(property);
2910                 m_jit.move(TrustedImm32(JSValue::Int32Tag), propertyTagGPR);
2911                 boxDouble(fpr, valueTagGPR, gpr);
2912 #endif
2913                 slowPathCases.append(m_jit.jump());
2914
2915                 fixed.link(&m_jit);
2916                 value.adopt(result);
2917             }
2918             break;
2919         }
2920             
2921         default:
2922             RELEASE_ASSERT_NOT_REACHED();
2923             break;
2924         }
2925     }
2926     return true;
2927 }
2928
2929 void SpeculativeJIT::compilePutByValForIntTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
2930 {
2931     ASSERT(isInt(type));
2932     
2933     StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
2934     GPRReg storageReg = storage.gpr();
2935     
2936     Edge valueUse = m_jit.graph().varArgChild(node, 2);
2937     
2938     GPRTemporary value;
2939 #if USE(JSVALUE32_64)
2940     GPRTemporary propertyTag;
2941     GPRTemporary valueTag;
2942 #endif
2943
2944     JITCompiler::JumpList slowPathCases;
2945     
2946     bool result = getIntTypedArrayStoreOperand(
2947         value, property,
2948 #if USE(JSVALUE32_64)
2949         propertyTag, valueTag,
2950 #endif
2951         valueUse, slowPathCases, isClamped(type));
2952     if (!result) {
2953         noResult(node);
2954         return;
2955     }
2956
2957     GPRReg valueGPR = value.gpr();
2958 #if USE(JSVALUE32_64)
2959     GPRReg propertyTagGPR = propertyTag.gpr();
2960     GPRReg valueTagGPR = valueTag.gpr();
2961 #endif
2962
2963     ASSERT_UNUSED(valueGPR, valueGPR != property);
2964     ASSERT(valueGPR != base);
2965     ASSERT(valueGPR != storageReg);
2966     JITCompiler::Jump outOfBounds = jumpForTypedArrayOutOfBounds(node, base, property);
2967
2968     switch (elementSize(type)) {
2969     case 1:
2970         m_jit.store8(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesOne));
2971         break;
2972     case 2:
2973         m_jit.store16(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesTwo));
2974         break;
2975     case 4:
2976         m_jit.store32(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
2977         break;
2978     default:
2979         CRASH();
2980     }
2981
2982     JITCompiler::Jump done = jumpForTypedArrayIsNeuteredIfOutOfBounds(node, base, outOfBounds);
2983     if (done.isSet())
2984         done.link(&m_jit);
2985
2986     if (!slowPathCases.empty()) {
2987 #if USE(JSVALUE64)
2988         if (node->op() == PutByValDirect) {
2989             addSlowPathGenerator(slowPathCall(
2990                 slowPathCases, this,
2991                 m_jit.isStrictModeFor(node->origin.semantic) ? operationPutByValDirectStrict : operationPutByValDirectNonStrict,
2992                 NoResult, base, property, valueGPR));
2993         } else {
2994             addSlowPathGenerator(slowPathCall(
2995                 slowPathCases, this,
2996                 m_jit.isStrictModeFor(node->origin.semantic) ? operationPutByValStrict : operationPutByValNonStrict,
2997                 NoResult, base, property, valueGPR));
2998         }
2999 #else // not USE(JSVALUE64)
3000         if (node->op() == PutByValDirect) {
3001             addSlowPathGenerator(slowPathCall(
3002                 slowPathCases, this,
3003                 m_jit.codeBlock()->isStrictMode() ? operationPutByValDirectCellStrict : operationPutByValDirectCellNonStrict,
3004                 NoResult, base, JSValueRegs(propertyTagGPR, property), JSValueRegs(valueTagGPR, valueGPR)));
3005         } else {
3006             addSlowPathGenerator(slowPathCall(
3007                 slowPathCases, this,
3008                 m_jit.codeBlock()->isStrictMode() ? operationPutByValCellStrict : operationPutByValCellNonStrict,
3009                 NoResult, base, JSValueRegs(propertyTagGPR, property), JSValueRegs(valueTagGPR, valueGPR)));
3010         }
3011 #endif
3012     }
3013     
3014     noResult(node);
3015 }
3016
3017 void SpeculativeJIT::compileGetByValOnFloatTypedArray(Node* node, TypedArrayType type)
3018 {
3019     ASSERT(isFloat(type));
3020     
3021     SpeculateCellOperand base(this, node->child1());
3022     SpeculateStrictInt32Operand property(this, node->child2());
3023     StorageOperand storage(this, node->child3());
3024
3025     GPRReg baseReg = base.gpr();
3026     GPRReg propertyReg = property.gpr();
3027     GPRReg storageReg = storage.gpr();
3028
3029     ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
3030
3031     FPRTemporary result(this);
3032     FPRReg resultReg = result.fpr();
3033     emitTypedArrayBoundsCheck(node, baseReg, propertyReg);
3034     switch (elementSize(type)) {
3035     case 4:
3036         m_jit.loadFloat(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
3037         m_jit.convertFloatToDouble(resultReg, resultReg);
3038         break;
3039     case 8: {
3040         m_jit.loadDouble(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight), resultReg);
3041         break;
3042     }
3043     default:
3044         RELEASE_ASSERT_NOT_REACHED();
3045     }
3046     
3047     doubleResult(resultReg, node);
3048 }
3049
3050 void SpeculativeJIT::compilePutByValForFloatTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
3051 {
3052     ASSERT(isFloat(type));
3053     
3054     StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
3055     GPRReg storageReg = storage.gpr();
3056     
3057     Edge baseUse = m_jit.graph().varArgChild(node, 0);
3058     Edge valueUse = m_jit.graph().varArgChild(node, 2);
3059
3060     SpeculateDoubleOperand valueOp(this, valueUse);
3061     FPRTemporary scratch(this);
3062     FPRReg valueFPR = valueOp.fpr();
3063     FPRReg scratchFPR = scratch.fpr();
3064
3065     ASSERT_UNUSED(baseUse, node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(baseUse)));
3066     
3067     MacroAssembler::Jump outOfBounds = jumpForTypedArrayOutOfBounds(node, base, property);
3068     
3069     switch (elementSize(type)) {
3070     case 4: {
3071         m_jit.moveDouble(valueFPR, scratchFPR);
3072         m_jit.convertDoubleToFloat(valueFPR, scratchFPR);
3073         m_jit.storeFloat(scratchFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
3074         break;
3075     }
3076     case 8:
3077         m_jit.storeDouble(valueFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesEight));
3078         break;
3079     default:
3080         RELEASE_ASSERT_NOT_REACHED();
3081     }
3082
3083     JITCompiler::Jump done = jumpForTypedArrayIsNeuteredIfOutOfBounds(node, base, outOfBounds);
3084     if (done.isSet())
3085         done.link(&m_jit);
3086     noResult(node);
3087 }
3088
3089 void SpeculativeJIT::compileInstanceOfForObject(Node*, GPRReg valueReg, GPRReg prototypeReg, GPRReg scratchReg, GPRReg scratch2Reg)
3090 {
3091     // Check that prototype is an object.
3092     speculationCheck(BadType, JSValueRegs(), 0, m_jit.branchIfNotObject(prototypeReg));
3093     
3094     // Initialize scratchReg with the value being checked.
3095     m_jit.move(valueReg, scratchReg);
3096     
3097     // Walk up the prototype chain of the value (in scratchReg), comparing to prototypeReg.
3098     MacroAssembler::Label loop(&m_jit);
3099     MacroAssembler::Jump performDefaultHasInstance = m_jit.branch8(MacroAssembler::Equal,
3100         MacroAssembler::Address(scratchReg, JSCell::typeInfoTypeOffset()), TrustedImm32(ProxyObjectType));
3101     m_jit.emitLoadStructure(*m_jit.vm(), scratchReg, scratchReg, scratch2Reg);
3102     m_jit.loadPtr(MacroAssembler::Address(scratchReg, Structure::prototypeOffset() + CellPayloadOffset), scratchReg);
3103     MacroAssembler::Jump isInstance = m_jit.branchPtr(MacroAssembler::Equal, scratchReg, prototypeReg);
3104 #if USE(JSVALUE64)
3105     m_jit.branchIfCell(JSValueRegs(scratchReg)).linkTo(loop, &m_jit);
3106 #else
3107     m_jit.branchTestPtr(MacroAssembler::NonZero, scratchReg).linkTo(loop, &m_jit);
3108 #endif
3109     
3110     // No match - result is false.
3111 #if USE(JSVALUE64)
3112     m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(false))), scratchReg);
3113 #else
3114     m_jit.move(MacroAssembler::TrustedImm32(0), scratchReg);
3115 #endif
3116     MacroAssembler::JumpList doneJumps; 
3117     doneJumps.append(m_jit.jump());
3118
3119     performDefaultHasInstance.link(&m_jit);
3120     silentSpillAllRegisters(scratchReg);
3121     callOperation(operationDefaultHasInstance, scratchReg, valueReg, prototypeReg); 
3122     silentFillAllRegisters();
3123     m_jit.exceptionCheck();
3124 #if USE(JSVALUE64)
3125     m_jit.or32(TrustedImm32(ValueFalse), scratchReg);
3126 #endif
3127     doneJumps.append(m_jit.jump());
3128     
3129     isInstance.link(&m_jit);
3130 #if USE(JSVALUE64)
3131     m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(true))), scratchReg);
3132 #else
3133     m_jit.move(MacroAssembler::TrustedImm32(1), scratchReg);
3134 #endif
3135     
3136     doneJumps.link(&m_jit);
3137 }
3138
3139 void SpeculativeJIT::compileCheckTypeInfoFlags(Node* node)
3140 {
3141     SpeculateCellOperand base(this, node->child1());
3142
3143     GPRReg baseGPR = base.gpr();
3144
3145     speculationCheck(BadTypeInfoFlags, JSValueRegs(), 0, m_jit.branchTest8(MacroAssembler::Zero, MacroAssembler::Address(baseGPR, JSCell::typeInfoFlagsOffset()), MacroAssembler::TrustedImm32(node->typeInfoOperand())));
3146
3147     noResult(node);
3148 }
3149
3150 void SpeculativeJIT::compileParseInt(Node* node)
3151 {
3152     RELEASE_ASSERT(node->child1().useKind() == UntypedUse || node->child1().useKind() == StringUse);
3153
3154     GPRFlushedCallResult resultPayload(this);
3155     GPRReg resultPayloadGPR = resultPayload.gpr();
3156 #if USE(JSVALUE64)
3157     JSValueRegs resultRegs(resultPayloadGPR);
3158 #else
3159     GPRFlushedCallResult2 resultTag(this);
3160     GPRReg resultTagGPR = resultTag.gpr();
3161     JSValueRegs resultRegs(resultTagGPR, resultPayloadGPR);
3162 #endif
3163
3164     if (node->child2()) {
3165         SpeculateInt32Operand radix(this, node->child2());
3166         GPRReg radixGPR = radix.gpr();
3167         if (node->child1().useKind() == UntypedUse) {
3168             JSValueOperand value(this, node->child1());
3169 #if USE(JSVALUE64)
3170             auto result = resultRegs.gpr();
3171             auto valueReg = value.gpr();
3172 #else
3173             auto result = resultRegs;
3174             auto valueReg = value.jsValueRegs();
3175 #endif
3176
3177             flushRegisters();
3178             callOperation(operationParseIntGeneric, result, valueReg, radixGPR);
3179             m_jit.exceptionCheck();
3180         } else {
3181             SpeculateCellOperand value(this, node->child1());
3182             GPRReg valueGPR = value.gpr();
3183             speculateString(node->child1(), valueGPR);
3184
3185 #if USE(JSVALUE64)
3186             auto result = resultRegs.gpr();
3187 #else
3188             auto result = resultRegs;
3189 #endif
3190
3191             flushRegisters();
3192             callOperation(operationParseIntString, result, valueGPR, radixGPR);
3193             m_jit.exceptionCheck();
3194         }
3195     } else {
3196         if (node->child1().useKind() == UntypedUse) {
3197             JSValueOperand value(this, node->child1());
3198 #if USE(JSVALUE64)
3199             auto result = resultRegs.gpr();
3200 #else
3201             auto result = resultRegs;
3202 #endif
3203             JSValueRegs valueRegs = value.jsValueRegs();
3204
3205             flushRegisters();
3206             callOperation(operationParseIntNoRadixGeneric, result, valueRegs);
3207             m_jit.exceptionCheck();
3208         } else {
3209             SpeculateCellOperand value(this, node->child1());
3210             GPRReg valueGPR = value.gpr();
3211             speculateString(node->child1(), valueGPR);
3212
3213             flushRegisters();
3214             callOperation(operationParseIntStringNoRadix, resultRegs, valueGPR);
3215             m_jit.exceptionCheck();
3216         }
3217     }
3218
3219     jsValueResult(resultRegs, node);
3220 }
3221
3222 void SpeculativeJIT::compileInstanceOf(Node* node)
3223 {
3224     if (node->child1().useKind() == UntypedUse) {
3225         // It might not be a cell. Speculate less aggressively.
3226         // Or: it might only be used once (i.e. by us), so we get zero benefit
3227         // from speculating any more aggressively than we absolutely need to.
3228         
3229         JSValueOperand value(this, node->child1());
3230         SpeculateCellOperand prototype(this, node->child2());
3231         GPRTemporary scratch(this);
3232         GPRTemporary scratch2(this);
3233         
3234         GPRReg prototypeReg = prototype.gpr();
3235         GPRReg scratchReg = scratch.gpr();
3236         GPRReg scratch2Reg = scratch2.gpr();
3237         
3238         MacroAssembler::Jump isCell = m_jit.branchIfCell(value.jsValueRegs());
3239         GPRReg valueReg = value.jsValueRegs().payloadGPR();
3240         moveFalseTo(scratchReg);
3241
3242         MacroAssembler::Jump done = m_jit.jump();
3243         
3244         isCell.link(&m_jit);
3245         
3246         compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg, scratch2Reg);
3247         
3248         done.link(&m_jit);
3249
3250         blessedBooleanResult(scratchReg, node);
3251         return;
3252     }
3253     
3254     SpeculateCellOperand value(this, node->child1());
3255     SpeculateCellOperand prototype(this, node->child2());
3256     
3257     GPRTemporary scratch(this);
3258     GPRTemporary scratch2(this);
3259     
3260     GPRReg valueReg = value.gpr();
3261     GPRReg prototypeReg = prototype.gpr();
3262     GPRReg scratchReg = scratch.gpr();
3263     GPRReg scratch2Reg = scratch2.gpr();
3264     
3265     compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg, scratch2Reg);
3266
3267     blessedBooleanResult(scratchReg, node);
3268 }
3269
3270 template<typename SnippetGenerator, J_JITOperation_EJJ snippetSlowPathFunction>
3271 void SpeculativeJIT::emitUntypedBitOp(Node* node)
3272 {
3273     Edge& leftChild = node->child1();
3274     Edge& rightChild = node->child2();
3275
3276     if (isKnownNotNumber(leftChild.node()) || isKnownNotNumber(rightChild.node())) {
3277         JSValueOperand left(this, leftChild);
3278         JSValueOperand right(this, rightChild);
3279         JSValueRegs leftRegs = left.jsValueRegs();
3280         JSValueRegs rightRegs = right.jsValueRegs();
3281 #if USE(JSVALUE64)
3282         GPRTemporary result(this);
3283         JSValueRegs resultRegs = JSValueRegs(result.gpr());
3284 #else
3285         GPRTemporary resultTag(this);
3286         GPRTemporary resultPayload(this);
3287         JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3288 #endif
3289         flushRegisters();
3290         callOperation(snippetSlowPathFunction, resultRegs, leftRegs, rightRegs);
3291         m_jit.exceptionCheck();
3292
3293         jsValueResult(resultRegs, node);
3294         return;
3295     }
3296
3297     std::optional<JSValueOperand> left;
3298     std::optional<JSValueOperand> right;
3299
3300     JSValueRegs leftRegs;
3301     JSValueRegs rightRegs;
3302
3303 #if USE(JSVALUE64)
3304     GPRTemporary result(this);
3305     JSValueRegs resultRegs = JSValueRegs(result.gpr());
3306     GPRTemporary scratch(this);
3307     GPRReg scratchGPR = scratch.gpr();
3308 #else
3309     GPRTemporary resultTag(this);
3310     GPRTemporary resultPayload(this);
3311     JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3312     GPRReg scratchGPR = resultTag.gpr();
3313 #endif
3314
3315     SnippetOperand leftOperand;
3316     SnippetOperand rightOperand;
3317
3318     // The snippet generator does not support both operands being constant. If the left
3319     // operand is already const, we'll ignore the right operand's constness.
3320     if (leftChild->isInt32Constant())
3321         leftOperand.setConstInt32(leftChild->asInt32());
3322     else if (rightChild->isInt32Constant())
3323         rightOperand.setConstInt32(rightChild->asInt32());
3324
3325     RELEASE_ASSERT(!leftOperand.isConst() || !rightOperand.isConst());
3326
3327     if (!leftOperand.isConst()) {
3328         left.emplace(this, leftChild);
3329         leftRegs = left->jsValueRegs();
3330     }
3331     if (!rightOperand.isConst()) {
3332         right.emplace(this, rightChild);
3333         rightRegs = right->jsValueRegs();
3334     }
3335
3336     SnippetGenerator gen(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs, scratchGPR);
3337     gen.generateFastPath(m_jit);
3338
3339     ASSERT(gen.didEmitFastPath());
3340     gen.endJumpList().append(m_jit.jump());
3341
3342     gen.slowPathJumpList().link(&m_jit);
3343     silentSpillAllRegisters(resultRegs);
3344
3345     if (leftOperand.isConst()) {
3346         leftRegs = resultRegs;
3347         m_jit.moveValue(leftChild->asJSValue(), leftRegs);
3348     } else if (rightOperand.isConst()) {
3349         rightRegs = resultRegs;
3350         m_jit.moveValue(rightChild->asJSValue(), rightRegs);
3351     }
3352
3353     callOperation(snippetSlowPathFunction, resultRegs, leftRegs, rightRegs);
3354
3355     silentFillAllRegisters();
3356     m_jit.exceptionCheck();
3357
3358     gen.endJumpList().link(&m_jit);
3359     jsValueResult(resultRegs, node);
3360 }
3361
3362 void SpeculativeJIT::compileBitwiseOp(Node* node)
3363 {
3364     NodeType op = node->op();
3365     Edge& leftChild = node->child1();
3366     Edge& rightChild = node->child2();
3367
3368     if (leftChild.useKind() == UntypedUse || rightChild.useKind() == UntypedUse) {
3369         switch (op) {
3370         case BitAnd:
3371             emitUntypedBitOp<JITBitAndGenerator, operationValueBitAnd>(node);
3372             return;
3373         case BitOr:
3374             emitUntypedBitOp<JITBitOrGenerator, operationValueBitOr>(node);
3375             return;
3376         case BitXor:
3377             emitUntypedBitOp<JITBitXorGenerator, operationValueBitXor>(node);
3378             return;
3379         default:
3380             RELEASE_ASSERT_NOT_REACHED();
3381         }
3382     }
3383
3384     if (leftChild->isInt32Constant()) {
3385         SpeculateInt32Operand op2(this, rightChild);
3386         GPRTemporary result(this, Reuse, op2);
3387
3388         bitOp(op, leftChild->asInt32(), op2.gpr(), result.gpr());
3389
3390         int32Result(result.gpr(), node);
3391
3392     } else if (rightChild->isInt32Constant()) {
3393         SpeculateInt32Operand op1(this, leftChild);
3394         GPRTemporary result(this, Reuse, op1);
3395
3396         bitOp(op, rightChild->asInt32(), op1.gpr(), result.gpr());
3397
3398         int32Result(result.gpr(), node);
3399
3400     } else {
3401         SpeculateInt32Operand op1(this, leftChild);
3402         SpeculateInt32Operand op2(this, rightChild);
3403         GPRTemporary result(this, Reuse, op1, op2);
3404         
3405         GPRReg reg1 = op1.gpr();
3406         GPRReg reg2 = op2.gpr();
3407         bitOp(op, reg1, reg2, result.gpr());
3408         
3409         int32Result(result.gpr(), node);
3410     }
3411 }
3412
3413 void SpeculativeJIT::emitUntypedRightShiftBitOp(Node* node)
3414 {
3415     J_JITOperation_EJJ snippetSlowPathFunction = node->op() == BitRShift
3416         ? operationValueBitRShift : operationValueBitURShift;
3417     JITRightShiftGenerator::ShiftType shiftType = node->op() == BitRShift
3418         ? JITRightShiftGenerator::SignedShift : JITRightShiftGenerator::UnsignedShift;
3419
3420     Edge& leftChild = node->child1();
3421     Edge& rightChild = node->child2();
3422
3423     if (isKnownNotNumber(leftChild.node()) || isKnownNotNumber(rightChild.node())) {
3424         JSValueOperand left(this, leftChild);
3425         JSValueOperand right(this, rightChild);
3426         JSValueRegs leftRegs = left.jsValueRegs();
3427         JSValueRegs rightRegs = right.jsValueRegs();
3428 #if USE(JSVALUE64)
3429         GPRTemporary result(this);
3430         JSValueRegs resultRegs = JSValueRegs(result.gpr());
3431 #else
3432         GPRTemporary resultTag(this);
3433         GPRTemporary resultPayload(this);
3434         JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3435 #endif
3436         flushRegisters();
3437         callOperation(snippetSlowPathFunction, resultRegs, leftRegs, rightRegs);
3438         m_jit.exceptionCheck();
3439
3440         jsValueResult(resultRegs, node);
3441         return;
3442     }
3443
3444     std::optional<JSValueOperand> left;
3445     std::optional<JSValueOperand> right;
3446
3447     JSValueRegs leftRegs;
3448     JSValueRegs rightRegs;
3449
3450     FPRTemporary leftNumber(this);
3451     FPRReg leftFPR = leftNumber.fpr();
3452
3453 #if USE(JSVALUE64)
3454     GPRTemporary result(this);
3455     JSValueRegs resultRegs = JSValueRegs(result.gpr());
3456     GPRTemporary scratch(this);
3457     GPRReg scratchGPR = scratch.gpr();
3458     FPRReg scratchFPR = InvalidFPRReg;
3459 #else
3460     GPRTemporary resultTag(this);
3461     GPRTemporary resultPayload(this);
3462     JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3463     GPRReg scratchGPR = resultTag.gpr();
3464     FPRTemporary fprScratch(this);
3465     FPRReg scratchFPR = fprScratch.fpr();
3466 #endif
3467
3468     SnippetOperand leftOperand;
3469     SnippetOperand rightOperand;
3470
3471     // The snippet generator does not support both operands being constant. If the left
3472     // operand is already const, we'll ignore the right operand's constness.
3473     if (leftChild->isInt32Constant())
3474         leftOperand.setConstInt32(leftChild->asInt32());
3475     else if (rightChild->isInt32Constant())
3476         rightOperand.setConstInt32(rightChild->asInt32());
3477
3478     RELEASE_ASSERT(!leftOperand.isConst() || !rightOperand.isConst());
3479
3480     if (!leftOperand.isConst()) {
3481         left.emplace(this, leftChild);
3482         leftRegs = left->jsValueRegs();
3483     }
3484     if (!rightOperand.isConst()) {
3485         right.emplace(this, rightChild);
3486         rightRegs = right->jsValueRegs();
3487     }
3488
3489     JITRightShiftGenerator gen(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs,
3490         leftFPR, scratchGPR, scratchFPR, shiftType);
3491     gen.generateFastPath(m_jit);
3492
3493     ASSERT(gen.didEmitFastPath());
3494     gen.endJumpList().append(m_jit.jump());
3495
3496     gen.slowPathJumpList().link(&m_jit);
3497     silentSpillAllRegisters(resultRegs);
3498
3499     if (leftOperand.isConst()) {
3500         leftRegs = resultRegs;
3501         m_jit.moveValue(leftChild->asJSValue(), leftRegs);
3502     } else if (rightOperand.isConst()) {
3503         rightRegs = resultRegs;
3504         m_jit.moveValue(rightChild->asJSValue(), rightRegs);
3505     }
3506
3507     callOperation(snippetSlowPathFunction, resultRegs, leftRegs, rightRegs);
3508
3509     silentFillAllRegisters();
3510     m_jit.exceptionCheck();
3511
3512     gen.endJumpList().link(&m_jit);
3513     jsValueResult(resultRegs, node);
3514     return;
3515 }
3516
3517 void SpeculativeJIT::compileShiftOp(Node* node)
3518 {
3519     NodeType op = node->op();
3520     Edge& leftChild = node->child1();
3521     Edge& rightChild = node->child2();
3522
3523     if (leftChild.useKind() == UntypedUse || rightChild.useKind() == UntypedUse) {
3524         switch (op) {
3525         case BitLShift:
3526             emitUntypedBitOp<JITLeftShiftGenerator, operationValueBitLShift>(node);
3527             return;
3528         case BitRShift:
3529         case BitURShift:
3530             emitUntypedRightShiftBitOp(node);
3531             return;
3532         default:
3533             RELEASE_ASSERT_NOT_REACHED();
3534         }
3535     }
3536
3537     if (rightChild->isInt32Constant()) {
3538         SpeculateInt32Operand op1(this, leftChild);
3539         GPRTemporary result(this, Reuse, op1);
3540
3541         shiftOp(op, op1.gpr(), rightChild->asInt32() & 0x1f, result.gpr());
3542
3543         int32Result(result.gpr(), node);
3544     } else {
3545         // Do not allow shift amount to be used as the result, MacroAssembler does not permit this.
3546         SpeculateInt32Operand op1(this, leftChild);
3547         SpeculateInt32Operand op2(this, rightChild);
3548         GPRTemporary result(this, Reuse, op1);
3549
3550         GPRReg reg1 = op1.gpr();
3551         GPRReg reg2 = op2.gpr();
3552         shiftOp(op, reg1, reg2, result.gpr());
3553
3554         int32Result(result.gpr(), node);
3555     }
3556 }
3557
3558 void SpeculativeJIT::compileValueAdd(Node* node)
3559 {
3560     Edge& leftChild = node->child1();
3561     Edge& rightChild = node->child2();
3562
3563     if (isKnownNotNumber(leftChild.node()) || isKnownNotNumber(rightChild.node())) {
3564         JSValueOperand left(this, leftChild);
3565         JSValueOperand right(this, rightChild);
3566         JSValueRegs leftRegs = left.jsValueRegs();
3567         JSValueRegs rightRegs = right.jsValueRegs();
3568 #if USE(JSVALUE64)
3569         GPRTemporary result(this);
3570         JSValueRegs resultRegs = JSValueRegs(result.gpr());
3571 #else
3572         GPRTemporary resultTag(this);
3573         GPRTemporary resultPayload(this);
3574         JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3575 #endif
3576         flushRegisters();
3577         callOperation(operationValueAddNotNumber, resultRegs, leftRegs, rightRegs);
3578         m_jit.exceptionCheck();
3579     
3580         jsValueResult(resultRegs, node);
3581         return;
3582     }
3583
3584 #if USE(JSVALUE64)
3585     bool needsScratchGPRReg = true;
3586     bool needsScratchFPRReg = false;
3587 #else
3588     bool needsScratchGPRReg = true;
3589     bool needsScratchFPRReg = true;
3590 #endif
3591
3592     ArithProfile* arithProfile = m_jit.graph().baselineCodeBlockFor(node->origin.semantic)->arithProfileForBytecodeOffset(node->origin.semantic.bytecodeIndex);
3593     JITAddIC* addIC = m_jit.codeBlock()->addJITAddIC(arithProfile);
3594     auto repatchingFunction = operationValueAddOptimize;
3595     auto nonRepatchingFunction = operationValueAdd;
3596     
3597     compileMathIC(node, addIC, needsScratchGPRReg, needsScratchFPRReg, repatchingFunction, nonRepatchingFunction);
3598 }
3599
3600 template <typename Generator, typename RepatchingFunction, typename NonRepatchingFunction>
3601 void SpeculativeJIT::compileMathIC(Node* node, JITBinaryMathIC<Generator>* mathIC, bool needsScratchGPRReg, bool needsScratchFPRReg, RepatchingFunction repatchingFunction, NonRepatchingFunction nonRepatchingFunction)
3602 {
3603     Edge& leftChild = node->child1();
3604     Edge& rightChild = node->child2();
3605
3606     std::optional<JSValueOperand> left;
3607     std::optional<JSValueOperand> right;
3608
3609     JSValueRegs leftRegs;
3610     JSValueRegs rightRegs;
3611
3612     FPRTemporary leftNumber(this);
3613     FPRTemporary rightNumber(this);
3614     FPRReg leftFPR = leftNumber.fpr();
3615     FPRReg rightFPR = rightNumber.fpr();
3616
3617     GPRReg scratchGPR = InvalidGPRReg;
3618     FPRReg scratchFPR = InvalidFPRReg;
3619
3620     std::optional<FPRTemporary> fprScratch;
3621     if (needsScratchFPRReg) {
3622         fprScratch.emplace(this);
3623         scratchFPR = fprScratch->fpr();
3624     }
3625
3626 #if USE(JSVALUE64)
3627     std::optional<GPRTemporary> gprScratch;
3628     if (needsScratchGPRReg) {
3629         gprScratch.emplace(this);
3630         scratchGPR = gprScratch->gpr();
3631     }
3632     GPRTemporary result(this);
3633     JSValueRegs resultRegs = JSValueRegs(result.gpr());
3634 #else
3635     GPRTemporary resultTag(this);
3636     GPRTemporary resultPayload(this);
3637     JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3638     if (needsScratchGPRReg)
3639         scratchGPR = resultRegs.tagGPR();
3640 #endif
3641
3642     SnippetOperand leftOperand(m_state.forNode(leftChild).resultType());
3643     SnippetOperand rightOperand(m_state.forNode(rightChild).resultType());
3644
3645     // The snippet generator does not support both operands being constant. If the left
3646     // operand is already const, we'll ignore the right operand's constness.
3647     if (leftChild->isInt32Constant())
3648         leftOperand.setConstInt32(leftChild->asInt32());
3649     else if (rightChild->isInt32Constant())
3650         rightOperand.setConstInt32(rightChild->asInt32());
3651
3652     ASSERT(!leftOperand.isConst() || !rightOperand.isConst());
3653     ASSERT(!(Generator::isLeftOperandValidConstant(leftOperand) && Generator::isRightOperandValidConstant(rightOperand)));
3654
3655     if (!Generator::isLeftOperandValidConstant(leftOperand)) {
3656         left.emplace(this, leftChild);
3657         leftRegs = left->jsValueRegs();
3658     }
3659     if (!Generator::isRightOperandValidConstant(rightOperand)) {
3660         right.emplace(this, rightChild);
3661         rightRegs = right->jsValueRegs();
3662     }
3663
3664 #if ENABLE(MATH_IC_STATS)
3665     auto inlineStart = m_jit.label();
3666 #endif
3667
3668     Box<MathICGenerationState> addICGenerationState = Box<MathICGenerationState>::create();
3669     mathIC->m_generator = Generator(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs, leftFPR, rightFPR, scratchGPR, scratchFPR);
3670
3671     bool shouldEmitProfiling = false;
3672     bool generatedInline = mathIC->generateInline(m_jit, *addICGenerationState, shouldEmitProfiling);
3673
3674     if (generatedInline) {
3675         ASSERT(!addICGenerationState->slowPathJumps.empty());
3676
3677         Vector<SilentRegisterSavePlan> savePlans;
3678         silentSpillAllRegistersImpl(false, savePlans, resultRegs);
3679
3680         auto done = m_jit.label();
3681
3682         addSlowPathGenerator([=, savePlans = WTFMove(savePlans)] () {
3683             addICGenerationState->slowPathJumps.link(&m_jit);
3684             addICGenerationState->slowPathStart = m_jit.label();
3685 #if ENABLE(MATH_IC_STATS)
3686             auto slowPathStart = m_jit.label();
3687 #endif
3688
3689             silentSpill(savePlans);
3690
3691             auto innerLeftRegs = leftRegs;
3692             auto innerRightRegs = rightRegs;
3693             if (Generator::isLeftOperandValidConstant(leftOperand)) {
3694                 innerLeftRegs = resultRegs;
3695                 m_jit.moveValue(leftChild->asJSValue(), innerLeftRegs);
3696             } else if (Generator::isRightOperandValidConstant(rightOperand)) {
3697                 innerRightRegs = resultRegs;
3698                 m_jit.moveValue(rightChild->asJSValue(), innerRightRegs);
3699             }
3700
3701             if (addICGenerationState->shouldSlowPathRepatch)
3702                 addICGenerationState->slowPathCall = callOperation(bitwise_cast<J_JITOperation_EJJMic>(repatchingFunction), resultRegs, innerLeftRegs, innerRightRegs, TrustedImmPtr(mathIC));
3703             else
3704                 addICGenerationState->slowPathCall = callOperation(nonRepatchingFunction, resultRegs, innerLeftRegs, innerRightRegs);
3705
3706             silentFill(savePlans);
3707             m_jit.exceptionCheck();
3708             m_jit.jump().linkTo(done, &m_jit);
3709
3710             m_jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
3711                 mathIC->finalizeInlineCode(*addICGenerationState, linkBuffer);
3712             });
3713
3714 #if ENABLE(MATH_IC_STATS)
3715             auto slowPathEnd = m_jit.label();
3716             m_jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
3717                 size_t size = static_cast<char*>(linkBuffer.locationOf(slowPathEnd).executableAddress()) - static_cast<char*>(linkBuffer.locationOf(slowPathStart).executableAddress());
3718                 mathIC->m_generatedCodeSize += size;
3719             });
3720 #endif
3721
3722         });
3723     } else {
3724         if (Generator::isLeftOperandValidConstant(leftOperand)) {
3725             left.emplace(this, leftChild);
3726             leftRegs = left->jsValueRegs();
3727         } else if (Generator::isRightOperandValidConstant(rightOperand)) {
3728             right.emplace(this, rightChild);
3729             rightRegs = right->jsValueRegs();
3730         }
3731
3732         flushRegisters();
3733         callOperation(nonRepatchingFunction, resultRegs, leftRegs, rightRegs);
3734         m_jit.exceptionCheck();
3735     }
3736
3737 #if ENABLE(MATH_IC_STATS)
3738     auto inlineEnd = m_jit.label();
3739     m_jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
3740         size_t size = static_cast<char*>(linkBuffer.locationOf(inlineEnd).executableAddress()) - static_cast<char*>(linkBuffer.locationOf(inlineStart).executableAddress());
3741         mathIC->m_generatedCodeSize += size;
3742     });
3743 #endif
3744
3745     jsValueResult(resultRegs, node);
3746     return;
3747 }
3748
3749 void SpeculativeJIT::compileInstanceOfCustom(Node* node)
3750 {
3751     // We could do something smarter here but this case is currently super rare and unless
3752     // Symbol.hasInstance becomes popular will likely remain that way.
3753
3754     JSValueOperand value(this, node->child1());
3755     SpeculateCellOperand constructor(this, node->child2());
3756     JSValueOperand hasInstanceValue(this, node->child3());
3757     GPRTemporary result(this);
3758
3759     JSValueRegs valueRegs = value.jsValueRegs();
3760     GPRReg constructorGPR = constructor.gpr();
3761     JSValueRegs hasInstanceRegs = hasInstanceValue.jsValueRegs();
3762     GPRReg resultGPR = result.gpr();
3763
3764     MacroAssembler::Jump slowCase = m_jit.jump();
3765
3766     addSlowPathGenerator(slowPathCall(slowCase, this, operationInstanceOfCustom, resultGPR, valueRegs, constructorGPR, hasInstanceRegs));
3767
3768     unblessedBooleanResult(resultGPR, node);
3769 }
3770
3771 void SpeculativeJIT::compileIsCellWithType(Node* node)
3772 {
3773     switch (node->child1().useKind()) {
3774     case UntypedUse: {
3775         JSValueOperand value(this, node->child1());
3776 #if USE(JSVALUE64)
3777         GPRTemporary result(this, Reuse, value);
3778 #else
3779         GPRTemporary result(this, Reuse, value, PayloadWord);
3780 #endif
3781
3782         JSValueRegs valueRegs = value.jsValueRegs();
3783         GPRReg resultGPR = result.gpr();
3784
3785         JITCompiler::Jump isNotCell = m_jit.branchIfNotCell(valueRegs);
3786
3787         m_jit.compare8(JITCompiler::Equal,
3788             JITCompiler::Address(valueRegs.payloadGPR(), JSCell::typeInfoTypeOffset()),
3789             TrustedImm32(node->queriedType()),
3790             resultGPR);
3791         blessBoolean(resultGPR);
3792         JITCompiler::Jump done = m_jit.jump();
3793
3794         isNotCell.link(&m_jit);
3795         moveFalseTo(resultGPR);
3796
3797         done.link(&m_jit);
3798         blessedBooleanResult(resultGPR, node);
3799         return;
3800     }
3801
3802     case CellUse: {
3803         SpeculateCellOperand cell(this, node->child1());
3804         GPRTemporary result(this, Reuse, cell);
3805
3806         GPRReg cellGPR = cell.gpr();
3807         GPRReg resultGPR = result.gpr();
3808
3809         m_jit.compare8(JITCompiler::Equal,
3810             JITCompiler::Address(cellGPR, JSCell::typeInfoTypeOffset()),
3811             TrustedImm32(node->queriedType()),
3812             resultGPR);
3813         blessBoolean(resultGPR);
3814         blessedBooleanResult(resultGPR, node);
3815         return;
3816     }
3817
3818     default:
3819         RELEASE_ASSERT_NOT_REACHED();
3820         break;
3821     }
3822 }
3823
3824 void SpeculativeJIT::compileIsTypedArrayView(Node* node)
3825 {
3826     JSValueOperand value(this, node->child1());
3827 #if USE(JSVALUE64)
3828     GPRTemporary result(this, Reuse, value);
3829 #else
3830     GPRTemporary result(this, Reuse, value, PayloadWord);
3831 #endif
3832
3833     JSValueRegs valueRegs = value.jsValueRegs();
3834     GPRReg resultGPR = result.gpr();
3835
3836     JITCompiler::Jump isNotCell = m_jit.branchIfNotCell(valueRegs);
3837
3838     m_jit.load8(JITCompiler::Address(valueRegs.payloadGPR(), JSCell::typeInfoTypeOffset()), resultGPR);
3839     m_jit.sub32(TrustedImm32(Int8ArrayType), resultGPR);
3840     m_jit.compare32(JITCompiler::BelowOrEqual,
3841         resultGPR,
3842         TrustedImm32(Float64ArrayType - Int8ArrayType),
3843         resultGPR);
3844     blessBoolean(resultGPR);
3845     JITCompiler::Jump done = m_jit.jump();
3846
3847     isNotCell.link(&m_jit);
3848     moveFalseTo(resultGPR);
3849
3850     done.link(&m_jit);
3851     blessedBooleanResult(resultGPR, node);
3852 }
3853
3854 void SpeculativeJIT::compileCallObjectConstructor(Node* node)
3855 {
3856     RELEASE_ASSERT(node->child1().useKind() == UntypedUse);
3857     JSValueOperand value(this, node->child1());
3858 #if USE(JSVALUE64)
3859     GPRTemporary result(this, Reuse, value);
3860 #else
3861     GPRTemporary result(this, Reuse, value, PayloadWord);
3862 #endif
3863
3864     JSValueRegs valueRegs = value.jsValueRegs();
3865     GPRReg resultGPR = result.gpr();
3866
3867     MacroAssembler::JumpList slowCases;
3868     slowCases.append(m_jit.branchIfNotCell(valueRegs));
3869     slowCases.append(m_jit.branchIfNotObject(valueRegs.payloadGPR()));
3870     m_jit.move(valueRegs.payloadGPR(), resultGPR);
3871
3872     addSlowPathGenerator(slowPathCall(slowCases, this, operationObjectConstructor, resultGPR, m_jit.globalObjectFor(node->origin.semantic), valueRegs));
3873     cellResult(resultGPR, node);
3874 }
3875
3876 void SpeculativeJIT::compileArithAdd(Node* node)
3877 {
3878     switch (node->binaryUseKind()) {
3879     case Int32Use: {
3880         ASSERT(!shouldCheckNegativeZero(node->arithMode()));
3881
3882         if (node->child2()->isInt32Constant()) {
3883             SpeculateInt32Operand op1(this, node->child1());
3884             GPRTemporary result(this, Reuse, op1);
3885
3886             GPRReg gpr1 = op1.gpr();
3887             int32_t imm2 = node->child2()->asInt32();
3888             GPRReg gprResult = result.gpr();
3889
3890             if (!shouldCheckOverflow(node->arithMode())) {
3891                 m_jit.add32(Imm32(imm2), gpr1, gprResult);
3892                 int32Result(gprResult, node);
3893                 return;
3894             }
3895
3896             MacroAssembler::Jump check = m_jit.branchAdd32(MacroAssembler::Overflow, gpr1, Imm32(imm2), gprResult);