Unreviewed, rolling out r210476.
[WebKit-https.git] / Source / JavaScriptCore / dfg / DFGSpeculativeJIT.cpp
1 /*
2  * Copyright (C) 2011-2016 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "DFGSpeculativeJIT.h"
28
29 #if ENABLE(DFG_JIT)
30
31 #include "BinarySwitch.h"
32 #include "DFGAbstractInterpreterInlines.h"
33 #include "DFGArrayifySlowPathGenerator.h"
34 #include "DFGCallArrayAllocatorSlowPathGenerator.h"
35 #include "DFGCallCreateDirectArgumentsSlowPathGenerator.h"
36 #include "DFGCapabilities.h"
37 #include "DFGDOMJITPatchpointParams.h"
38 #include "DFGMayExit.h"
39 #include "DFGOSRExitFuzz.h"
40 #include "DFGSaneStringGetByValSlowPathGenerator.h"
41 #include "DFGSlowPathGenerator.h"
42 #include "DOMJITPatchpoint.h"
43 #include "DirectArguments.h"
44 #include "JITAddGenerator.h"
45 #include "JITBitAndGenerator.h"
46 #include "JITBitOrGenerator.h"
47 #include "JITBitXorGenerator.h"
48 #include "JITDivGenerator.h"
49 #include "JITLeftShiftGenerator.h"
50 #include "JITMulGenerator.h"
51 #include "JITRightShiftGenerator.h"
52 #include "JITSubGenerator.h"
53 #include "JSAsyncFunction.h"
54 #include "JSCInlines.h"
55 #include "JSEnvironmentRecord.h"
56 #include "JSFixedArray.h"
57 #include "JSGeneratorFunction.h"
58 #include "JSLexicalEnvironment.h"
59 #include "LinkBuffer.h"
60 #include "RegExpConstructor.h"
61 #include "ScopedArguments.h"
62 #include "ScratchRegisterAllocator.h"
63 #include "WriteBarrierBuffer.h"
64 #include <wtf/BitVector.h>
65 #include <wtf/Box.h>
66 #include <wtf/MathExtras.h>
67
68 namespace JSC { namespace DFG {
69
70 SpeculativeJIT::SpeculativeJIT(JITCompiler& jit)
71     : m_compileOkay(true)
72     , m_jit(jit)
73     , m_currentNode(0)
74     , m_lastGeneratedNode(LastNodeType)
75     , m_indexInBlock(0)
76     , m_generationInfo(m_jit.graph().frameRegisterCount())
77     , m_state(m_jit.graph())
78     , m_interpreter(m_jit.graph(), m_state)
79     , m_stream(&jit.jitCode()->variableEventStream)
80     , m_minifiedGraph(&jit.jitCode()->minifiedDFG)
81 {
82 }
83
84 SpeculativeJIT::~SpeculativeJIT()
85 {
86 }
87
88 void SpeculativeJIT::emitAllocateRawObject(GPRReg resultGPR, Structure* structure, GPRReg storageGPR, unsigned numElements, unsigned vectorLength)
89 {
90     IndexingType indexingType = structure->indexingType();
91     bool hasIndexingHeader = hasIndexedProperties(indexingType);
92
93     unsigned inlineCapacity = structure->inlineCapacity();
94     unsigned outOfLineCapacity = structure->outOfLineCapacity();
95     
96     GPRTemporary scratch(this);
97     GPRTemporary scratch2(this);
98     GPRReg scratchGPR = scratch.gpr();
99     GPRReg scratch2GPR = scratch2.gpr();
100
101     ASSERT(vectorLength >= numElements);
102     vectorLength = Butterfly::optimalContiguousVectorLength(structure, vectorLength);
103     
104     JITCompiler::JumpList slowCases;
105
106     size_t size = 0;
107     if (hasIndexingHeader)
108         size += vectorLength * sizeof(JSValue) + sizeof(IndexingHeader);
109     size += outOfLineCapacity * sizeof(JSValue);
110
111     m_jit.move(TrustedImmPtr(0), storageGPR);
112     
113     if (size) {
114         if (MarkedAllocator* allocator = m_jit.vm()->heap.allocatorForAuxiliaryData(size)) {
115             m_jit.move(TrustedImmPtr(allocator), scratchGPR);
116             m_jit.emitAllocate(storageGPR, allocator, scratchGPR, scratch2GPR, slowCases);
117             
118             m_jit.addPtr(
119                 TrustedImm32(outOfLineCapacity * sizeof(JSValue) + sizeof(IndexingHeader)),
120                 storageGPR);
121             
122             if (hasIndexingHeader)
123                 m_jit.store32(TrustedImm32(vectorLength), MacroAssembler::Address(storageGPR, Butterfly::offsetOfVectorLength()));
124         } else
125             slowCases.append(m_jit.jump());
126     }
127
128     size_t allocationSize = JSFinalObject::allocationSize(inlineCapacity);
129     MarkedAllocator* allocatorPtr = m_jit.vm()->heap.allocatorForObjectWithoutDestructor(allocationSize);
130     if (allocatorPtr) {
131         m_jit.move(TrustedImmPtr(allocatorPtr), scratchGPR);
132         emitAllocateJSObject(resultGPR, allocatorPtr, scratchGPR, TrustedImmPtr(structure), storageGPR, scratch2GPR, slowCases);
133         m_jit.emitInitializeInlineStorage(resultGPR, structure->inlineCapacity());
134     } else
135         slowCases.append(m_jit.jump());
136
137     // I want a slow path that also loads out the storage pointer, and that's
138     // what this custom CallArrayAllocatorSlowPathGenerator gives me. It's a lot
139     // of work for a very small piece of functionality. :-/
140     addSlowPathGenerator(std::make_unique<CallArrayAllocatorSlowPathGenerator>(
141         slowCases, this, operationNewRawObject, resultGPR, storageGPR,
142         structure, vectorLength));
143
144     if (numElements < vectorLength) {
145 #if USE(JSVALUE64)
146         if (hasDouble(structure->indexingType()))
147             m_jit.move(TrustedImm64(bitwise_cast<int64_t>(PNaN)), scratchGPR);
148         else
149             m_jit.move(TrustedImm64(JSValue::encode(JSValue())), scratchGPR);
150         for (unsigned i = numElements; i < vectorLength; ++i)
151             m_jit.store64(scratchGPR, MacroAssembler::Address(storageGPR, sizeof(double) * i));
152 #else
153         EncodedValueDescriptor value;
154         if (hasDouble(structure->indexingType()))
155             value.asInt64 = JSValue::encode(JSValue(JSValue::EncodeAsDouble, PNaN));
156         else
157             value.asInt64 = JSValue::encode(JSValue());
158         for (unsigned i = numElements; i < vectorLength; ++i) {
159             m_jit.store32(TrustedImm32(value.asBits.tag), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
160             m_jit.store32(TrustedImm32(value.asBits.payload), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
161         }
162 #endif
163     }
164     
165     if (hasIndexingHeader)
166         m_jit.store32(TrustedImm32(numElements), MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()));
167     
168     m_jit.emitInitializeOutOfLineStorage(storageGPR, structure->outOfLineCapacity());
169     
170     m_jit.mutatorFence();
171 }
172
173 void SpeculativeJIT::emitGetLength(InlineCallFrame* inlineCallFrame, GPRReg lengthGPR, bool includeThis)
174 {
175     if (inlineCallFrame && !inlineCallFrame->isVarargs())
176         m_jit.move(TrustedImm32(inlineCallFrame->arguments.size() - !includeThis), lengthGPR);
177     else {
178         VirtualRegister argumentCountRegister = m_jit.argumentCount(inlineCallFrame);
179         m_jit.load32(JITCompiler::payloadFor(argumentCountRegister), lengthGPR);
180         if (!includeThis)
181             m_jit.sub32(TrustedImm32(1), lengthGPR);
182     }
183 }
184
185 void SpeculativeJIT::emitGetLength(CodeOrigin origin, GPRReg lengthGPR, bool includeThis)
186 {
187     emitGetLength(origin.inlineCallFrame, lengthGPR, includeThis);
188 }
189
190 void SpeculativeJIT::emitGetCallee(CodeOrigin origin, GPRReg calleeGPR)
191 {
192     if (origin.inlineCallFrame) {
193         if (origin.inlineCallFrame->isClosureCall) {
194             m_jit.loadPtr(
195                 JITCompiler::addressFor(origin.inlineCallFrame->calleeRecovery.virtualRegister()),
196                 calleeGPR);
197         } else {
198             m_jit.move(
199                 TrustedImmPtr(origin.inlineCallFrame->calleeRecovery.constant().asCell()),
200                 calleeGPR);
201         }
202     } else
203         m_jit.loadPtr(JITCompiler::addressFor(CallFrameSlot::callee), calleeGPR);
204 }
205
206 void SpeculativeJIT::emitGetArgumentStart(CodeOrigin origin, GPRReg startGPR)
207 {
208     m_jit.addPtr(
209         TrustedImm32(
210             JITCompiler::argumentsStart(origin).offset() * static_cast<int>(sizeof(Register))),
211         GPRInfo::callFrameRegister, startGPR);
212 }
213
214 MacroAssembler::Jump SpeculativeJIT::emitOSRExitFuzzCheck()
215 {
216     if (!Options::useOSRExitFuzz()
217         || !canUseOSRExitFuzzing(m_jit.graph().baselineCodeBlockFor(m_origin.semantic))
218         || !doOSRExitFuzzing())
219         return MacroAssembler::Jump();
220     
221     MacroAssembler::Jump result;
222     
223     m_jit.pushToSave(GPRInfo::regT0);
224     m_jit.load32(&g_numberOfOSRExitFuzzChecks, GPRInfo::regT0);
225     m_jit.add32(TrustedImm32(1), GPRInfo::regT0);
226     m_jit.store32(GPRInfo::regT0, &g_numberOfOSRExitFuzzChecks);
227     unsigned atOrAfter = Options::fireOSRExitFuzzAtOrAfter();
228     unsigned at = Options::fireOSRExitFuzzAt();
229     if (at || atOrAfter) {
230         unsigned threshold;
231         MacroAssembler::RelationalCondition condition;
232         if (atOrAfter) {
233             threshold = atOrAfter;
234             condition = MacroAssembler::Below;
235         } else {
236             threshold = at;
237             condition = MacroAssembler::NotEqual;
238         }
239         MacroAssembler::Jump ok = m_jit.branch32(
240             condition, GPRInfo::regT0, MacroAssembler::TrustedImm32(threshold));
241         m_jit.popToRestore(GPRInfo::regT0);
242         result = m_jit.jump();
243         ok.link(&m_jit);
244     }
245     m_jit.popToRestore(GPRInfo::regT0);
246     
247     return result;
248 }
249
250 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail)
251 {
252     if (!m_compileOkay)
253         return;
254     JITCompiler::Jump fuzzJump = emitOSRExitFuzzCheck();
255     if (fuzzJump.isSet()) {
256         JITCompiler::JumpList jumpsToFail;
257         jumpsToFail.append(fuzzJump);
258         jumpsToFail.append(jumpToFail);
259         m_jit.appendExitInfo(jumpsToFail);
260     } else
261         m_jit.appendExitInfo(jumpToFail);
262     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(m_currentNode, node), this, m_stream->size()));
263 }
264
265 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, const MacroAssembler::JumpList& jumpsToFail)
266 {
267     if (!m_compileOkay)
268         return;
269     JITCompiler::Jump fuzzJump = emitOSRExitFuzzCheck();
270     if (fuzzJump.isSet()) {
271         JITCompiler::JumpList myJumpsToFail;
272         myJumpsToFail.append(jumpsToFail);
273         myJumpsToFail.append(fuzzJump);
274         m_jit.appendExitInfo(myJumpsToFail);
275     } else
276         m_jit.appendExitInfo(jumpsToFail);
277     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(m_currentNode, node), this, m_stream->size()));
278 }
279
280 OSRExitJumpPlaceholder SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node)
281 {
282     if (!m_compileOkay)
283         return OSRExitJumpPlaceholder();
284     unsigned index = m_jit.jitCode()->osrExit.size();
285     m_jit.appendExitInfo();
286     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(m_currentNode, node), this, m_stream->size()));
287     return OSRExitJumpPlaceholder(index);
288 }
289
290 OSRExitJumpPlaceholder SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse)
291 {
292     return speculationCheck(kind, jsValueSource, nodeUse.node());
293 }
294
295 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail)
296 {
297     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail);
298 }
299
300 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, const MacroAssembler::JumpList& jumpsToFail)
301 {
302     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpsToFail);
303 }
304
305 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
306 {
307     if (!m_compileOkay)
308         return;
309     unsigned recoveryIndex = m_jit.jitCode()->appendSpeculationRecovery(recovery);
310     m_jit.appendExitInfo(jumpToFail);
311     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(m_currentNode, node), this, m_stream->size(), recoveryIndex));
312 }
313
314 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
315 {
316     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail, recovery);
317 }
318
319 void SpeculativeJIT::emitInvalidationPoint(Node* node)
320 {
321     if (!m_compileOkay)
322         return;
323     OSRExitCompilationInfo& info = m_jit.appendExitInfo(JITCompiler::JumpList());
324     m_jit.jitCode()->appendOSRExit(OSRExit(
325         UncountableInvalidation, JSValueSource(), MethodOfGettingAValueProfile(),
326         this, m_stream->size()));
327     info.m_replacementSource = m_jit.watchpointLabel();
328     ASSERT(info.m_replacementSource.isSet());
329     noResult(node);
330 }
331
332 void SpeculativeJIT::unreachable(Node* node)
333 {
334     m_compileOkay = false;
335     m_jit.abortWithReason(DFGUnreachableNode, node->op());
336 }
337
338 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Node* node)
339 {
340     if (!m_compileOkay)
341         return;
342     speculationCheck(kind, jsValueRegs, node, m_jit.jump());
343     m_compileOkay = false;
344     if (verboseCompilationEnabled())
345         dataLog("Bailing compilation.\n");
346 }
347
348 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Edge nodeUse)
349 {
350     terminateSpeculativeExecution(kind, jsValueRegs, nodeUse.node());
351 }
352
353 void SpeculativeJIT::typeCheck(JSValueSource source, Edge edge, SpeculatedType typesPassedThrough, MacroAssembler::Jump jumpToFail, ExitKind exitKind)
354 {
355     ASSERT(needsTypeCheck(edge, typesPassedThrough));
356     m_interpreter.filter(edge, typesPassedThrough);
357     speculationCheck(exitKind, source, edge.node(), jumpToFail);
358 }
359
360 RegisterSet SpeculativeJIT::usedRegisters()
361 {
362     RegisterSet result;
363     
364     for (unsigned i = GPRInfo::numberOfRegisters; i--;) {
365         GPRReg gpr = GPRInfo::toRegister(i);
366         if (m_gprs.isInUse(gpr))
367             result.set(gpr);
368     }
369     for (unsigned i = FPRInfo::numberOfRegisters; i--;) {
370         FPRReg fpr = FPRInfo::toRegister(i);
371         if (m_fprs.isInUse(fpr))
372             result.set(fpr);
373     }
374     
375     result.merge(RegisterSet::stubUnavailableRegisters());
376     
377     return result;
378 }
379
380 void SpeculativeJIT::addSlowPathGenerator(std::unique_ptr<SlowPathGenerator> slowPathGenerator)
381 {
382     m_slowPathGenerators.append(WTFMove(slowPathGenerator));
383 }
384
385 void SpeculativeJIT::addSlowPathGenerator(std::function<void()> lambda)
386 {
387     m_slowPathLambdas.append(SlowPathLambda{ lambda, m_currentNode, static_cast<unsigned>(m_stream->size()) });
388 }
389
390 void SpeculativeJIT::runSlowPathGenerators(PCToCodeOriginMapBuilder& pcToCodeOriginMapBuilder)
391 {
392     for (auto& slowPathGenerator : m_slowPathGenerators) {
393         pcToCodeOriginMapBuilder.appendItem(m_jit.labelIgnoringWatchpoints(), slowPathGenerator->origin().semantic);
394         slowPathGenerator->generate(this);
395     }
396     for (auto& slowPathLambda : m_slowPathLambdas) {
397         Node* currentNode = slowPathLambda.currentNode;
398         m_currentNode = currentNode;
399         m_outOfLineStreamIndex = slowPathLambda.streamIndex;
400         pcToCodeOriginMapBuilder.appendItem(m_jit.labelIgnoringWatchpoints(), currentNode->origin.semantic);
401         slowPathLambda.generator();
402         m_outOfLineStreamIndex = std::nullopt;
403     }
404 }
405
406 void SpeculativeJIT::clearGenerationInfo()
407 {
408     for (unsigned i = 0; i < m_generationInfo.size(); ++i)
409         m_generationInfo[i] = GenerationInfo();
410     m_gprs = RegisterBank<GPRInfo>();
411     m_fprs = RegisterBank<FPRInfo>();
412 }
413
414 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForGPR(VirtualRegister spillMe, GPRReg source)
415 {
416     GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
417     Node* node = info.node();
418     DataFormat registerFormat = info.registerFormat();
419     ASSERT(registerFormat != DataFormatNone);
420     ASSERT(registerFormat != DataFormatDouble);
421         
422     SilentSpillAction spillAction;
423     SilentFillAction fillAction;
424         
425     if (!info.needsSpill())
426         spillAction = DoNothingForSpill;
427     else {
428 #if USE(JSVALUE64)
429         ASSERT(info.gpr() == source);
430         if (registerFormat == DataFormatInt32)
431             spillAction = Store32Payload;
432         else if (registerFormat == DataFormatCell || registerFormat == DataFormatStorage)
433             spillAction = StorePtr;
434         else if (registerFormat == DataFormatInt52 || registerFormat == DataFormatStrictInt52)
435             spillAction = Store64;
436         else {
437             ASSERT(registerFormat & DataFormatJS);
438             spillAction = Store64;
439         }
440 #elif USE(JSVALUE32_64)
441         if (registerFormat & DataFormatJS) {
442             ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
443             spillAction = source == info.tagGPR() ? Store32Tag : Store32Payload;
444         } else {
445             ASSERT(info.gpr() == source);
446             spillAction = Store32Payload;
447         }
448 #endif
449     }
450         
451     if (registerFormat == DataFormatInt32) {
452         ASSERT(info.gpr() == source);
453         ASSERT(isJSInt32(info.registerFormat()));
454         if (node->hasConstant()) {
455             ASSERT(node->isInt32Constant());
456             fillAction = SetInt32Constant;
457         } else
458             fillAction = Load32Payload;
459     } else if (registerFormat == DataFormatBoolean) {
460 #if USE(JSVALUE64)
461         RELEASE_ASSERT_NOT_REACHED();
462 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
463         fillAction = DoNothingForFill;
464 #endif
465 #elif USE(JSVALUE32_64)
466         ASSERT(info.gpr() == source);
467         if (node->hasConstant()) {
468             ASSERT(node->isBooleanConstant());
469             fillAction = SetBooleanConstant;
470         } else
471             fillAction = Load32Payload;
472 #endif
473     } else if (registerFormat == DataFormatCell) {
474         ASSERT(info.gpr() == source);
475         if (node->hasConstant()) {
476             DFG_ASSERT(m_jit.graph(), m_currentNode, node->isCellConstant());
477             node->asCell(); // To get the assertion.
478             fillAction = SetCellConstant;
479         } else {
480 #if USE(JSVALUE64)
481             fillAction = LoadPtr;
482 #else
483             fillAction = Load32Payload;
484 #endif
485         }
486     } else if (registerFormat == DataFormatStorage) {
487         ASSERT(info.gpr() == source);
488         fillAction = LoadPtr;
489     } else if (registerFormat == DataFormatInt52) {
490         if (node->hasConstant())
491             fillAction = SetInt52Constant;
492         else if (info.spillFormat() == DataFormatInt52)
493             fillAction = Load64;
494         else if (info.spillFormat() == DataFormatStrictInt52)
495             fillAction = Load64ShiftInt52Left;
496         else if (info.spillFormat() == DataFormatNone)
497             fillAction = Load64;
498         else {
499             RELEASE_ASSERT_NOT_REACHED();
500 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
501             fillAction = Load64; // Make GCC happy.
502 #endif
503         }
504     } else if (registerFormat == DataFormatStrictInt52) {
505         if (node->hasConstant())
506             fillAction = SetStrictInt52Constant;
507         else if (info.spillFormat() == DataFormatInt52)
508             fillAction = Load64ShiftInt52Right;
509         else if (info.spillFormat() == DataFormatStrictInt52)
510             fillAction = Load64;
511         else if (info.spillFormat() == DataFormatNone)
512             fillAction = Load64;
513         else {
514             RELEASE_ASSERT_NOT_REACHED();
515 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
516             fillAction = Load64; // Make GCC happy.
517 #endif
518         }
519     } else {
520         ASSERT(registerFormat & DataFormatJS);
521 #if USE(JSVALUE64)
522         ASSERT(info.gpr() == source);
523         if (node->hasConstant()) {
524             if (node->isCellConstant())
525                 fillAction = SetTrustedJSConstant;
526             else
527                 fillAction = SetJSConstant;
528         } else if (info.spillFormat() == DataFormatInt32) {
529             ASSERT(registerFormat == DataFormatJSInt32);
530             fillAction = Load32PayloadBoxInt;
531         } else
532             fillAction = Load64;
533 #else
534         ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
535         if (node->hasConstant())
536             fillAction = info.tagGPR() == source ? SetJSConstantTag : SetJSConstantPayload;
537         else if (info.payloadGPR() == source)
538             fillAction = Load32Payload;
539         else { // Fill the Tag
540             switch (info.spillFormat()) {
541             case DataFormatInt32:
542                 ASSERT(registerFormat == DataFormatJSInt32);
543                 fillAction = SetInt32Tag;
544                 break;
545             case DataFormatCell:
546                 ASSERT(registerFormat == DataFormatJSCell);
547                 fillAction = SetCellTag;
548                 break;
549             case DataFormatBoolean:
550                 ASSERT(registerFormat == DataFormatJSBoolean);
551                 fillAction = SetBooleanTag;
552                 break;
553             default:
554                 fillAction = Load32Tag;
555                 break;
556             }
557         }
558 #endif
559     }
560         
561     return SilentRegisterSavePlan(spillAction, fillAction, node, source);
562 }
563     
564 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForFPR(VirtualRegister spillMe, FPRReg source)
565 {
566     GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
567     Node* node = info.node();
568     ASSERT(info.registerFormat() == DataFormatDouble);
569
570     SilentSpillAction spillAction;
571     SilentFillAction fillAction;
572         
573     if (!info.needsSpill())
574         spillAction = DoNothingForSpill;
575     else {
576         ASSERT(!node->hasConstant());
577         ASSERT(info.spillFormat() == DataFormatNone);
578         ASSERT(info.fpr() == source);
579         spillAction = StoreDouble;
580     }
581         
582 #if USE(JSVALUE64)
583     if (node->hasConstant()) {
584         node->asNumber(); // To get the assertion.
585         fillAction = SetDoubleConstant;
586     } else {
587         ASSERT(info.spillFormat() == DataFormatNone || info.spillFormat() == DataFormatDouble);
588         fillAction = LoadDouble;
589     }
590 #elif USE(JSVALUE32_64)
591     ASSERT(info.registerFormat() == DataFormatDouble);
592     if (node->hasConstant()) {
593         node->asNumber(); // To get the assertion.
594         fillAction = SetDoubleConstant;
595     } else
596         fillAction = LoadDouble;
597 #endif
598
599     return SilentRegisterSavePlan(spillAction, fillAction, node, source);
600 }
601     
602 void SpeculativeJIT::silentSpill(const SilentRegisterSavePlan& plan)
603 {
604     switch (plan.spillAction()) {
605     case DoNothingForSpill:
606         break;
607     case Store32Tag:
608         m_jit.store32(plan.gpr(), JITCompiler::tagFor(plan.node()->virtualRegister()));
609         break;
610     case Store32Payload:
611         m_jit.store32(plan.gpr(), JITCompiler::payloadFor(plan.node()->virtualRegister()));
612         break;
613     case StorePtr:
614         m_jit.storePtr(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
615         break;
616 #if USE(JSVALUE64)
617     case Store64:
618         m_jit.store64(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
619         break;
620 #endif
621     case StoreDouble:
622         m_jit.storeDouble(plan.fpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
623         break;
624     default:
625         RELEASE_ASSERT_NOT_REACHED();
626     }
627 }
628     
629 void SpeculativeJIT::silentFill(const SilentRegisterSavePlan& plan, GPRReg canTrample)
630 {
631 #if USE(JSVALUE32_64)
632     UNUSED_PARAM(canTrample);
633 #endif
634     switch (plan.fillAction()) {
635     case DoNothingForFill:
636         break;
637     case SetInt32Constant:
638         m_jit.move(Imm32(plan.node()->asInt32()), plan.gpr());
639         break;
640 #if USE(JSVALUE64)
641     case SetInt52Constant:
642         m_jit.move(Imm64(plan.node()->asAnyInt() << JSValue::int52ShiftAmount), plan.gpr());
643         break;
644     case SetStrictInt52Constant:
645         m_jit.move(Imm64(plan.node()->asAnyInt()), plan.gpr());
646         break;
647 #endif // USE(JSVALUE64)
648     case SetBooleanConstant:
649         m_jit.move(TrustedImm32(plan.node()->asBoolean()), plan.gpr());
650         break;
651     case SetCellConstant:
652         m_jit.move(TrustedImmPtr(plan.node()->asCell()), plan.gpr());
653         break;
654 #if USE(JSVALUE64)
655     case SetTrustedJSConstant:
656         m_jit.move(valueOfJSConstantAsImm64(plan.node()).asTrustedImm64(), plan.gpr());
657         break;
658     case SetJSConstant:
659         m_jit.move(valueOfJSConstantAsImm64(plan.node()), plan.gpr());
660         break;
661     case SetDoubleConstant:
662         m_jit.move(Imm64(reinterpretDoubleToInt64(plan.node()->asNumber())), canTrample);
663         m_jit.move64ToDouble(canTrample, plan.fpr());
664         break;
665     case Load32PayloadBoxInt:
666         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
667         m_jit.or64(GPRInfo::tagTypeNumberRegister, plan.gpr());
668         break;
669     case Load32PayloadConvertToInt52:
670         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
671         m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
672         m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
673         break;
674     case Load32PayloadSignExtend:
675         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
676         m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
677         break;
678 #else
679     case SetJSConstantTag:
680         m_jit.move(Imm32(plan.node()->asJSValue().tag()), plan.gpr());
681         break;
682     case SetJSConstantPayload:
683         m_jit.move(Imm32(plan.node()->asJSValue().payload()), plan.gpr());
684         break;
685     case SetInt32Tag:
686         m_jit.move(TrustedImm32(JSValue::Int32Tag), plan.gpr());
687         break;
688     case SetCellTag:
689         m_jit.move(TrustedImm32(JSValue::CellTag), plan.gpr());
690         break;
691     case SetBooleanTag:
692         m_jit.move(TrustedImm32(JSValue::BooleanTag), plan.gpr());
693         break;
694     case SetDoubleConstant:
695         m_jit.loadDouble(TrustedImmPtr(m_jit.addressOfDoubleConstant(plan.node())), plan.fpr());
696         break;
697 #endif
698     case Load32Tag:
699         m_jit.load32(JITCompiler::tagFor(plan.node()->virtualRegister()), plan.gpr());
700         break;
701     case Load32Payload:
702         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
703         break;
704     case LoadPtr:
705         m_jit.loadPtr(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
706         break;
707 #if USE(JSVALUE64)
708     case Load64:
709         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
710         break;
711     case Load64ShiftInt52Right:
712         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
713         m_jit.rshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
714         break;
715     case Load64ShiftInt52Left:
716         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
717         m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
718         break;
719 #endif
720     case LoadDouble:
721         m_jit.loadDouble(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.fpr());
722         break;
723     default:
724         RELEASE_ASSERT_NOT_REACHED();
725     }
726 }
727     
728 JITCompiler::Jump SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode, IndexingType shape)
729 {
730     switch (arrayMode.arrayClass()) {
731     case Array::OriginalArray: {
732         CRASH();
733 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
734         JITCompiler::Jump result; // I already know that VC++ takes unkindly to the expression "return Jump()", so I'm doing it this way in anticipation of someone eventually using VC++ to compile the DFG.
735         return result;
736 #endif
737     }
738         
739     case Array::Array:
740         m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
741         return m_jit.branch32(
742             MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | shape));
743         
744     case Array::NonArray:
745     case Array::OriginalNonArray:
746         m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
747         return m_jit.branch32(
748             MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
749         
750     case Array::PossiblyArray:
751         m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
752         return m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
753     }
754     
755     RELEASE_ASSERT_NOT_REACHED();
756     return JITCompiler::Jump();
757 }
758
759 JITCompiler::JumpList SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode)
760 {
761     JITCompiler::JumpList result;
762     
763     switch (arrayMode.type()) {
764     case Array::Int32:
765         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, Int32Shape);
766
767     case Array::Double:
768         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, DoubleShape);
769
770     case Array::Contiguous:
771         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, ContiguousShape);
772
773     case Array::Undecided:
774         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, UndecidedShape);
775
776     case Array::ArrayStorage:
777     case Array::SlowPutArrayStorage: {
778         ASSERT(!arrayMode.isJSArrayWithOriginalStructure());
779         
780         if (arrayMode.isJSArray()) {
781             if (arrayMode.isSlowPut()) {
782                 result.append(
783                     m_jit.branchTest32(
784                         MacroAssembler::Zero, tempGPR, MacroAssembler::TrustedImm32(IsArray)));
785                 m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
786                 m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
787                 result.append(
788                     m_jit.branch32(
789                         MacroAssembler::Above, tempGPR,
790                         TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
791                 break;
792             }
793             m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
794             result.append(
795                 m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | ArrayStorageShape)));
796             break;
797         }
798         m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
799         if (arrayMode.isSlowPut()) {
800             m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
801             result.append(
802                 m_jit.branch32(
803                     MacroAssembler::Above, tempGPR,
804                     TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
805             break;
806         }
807         result.append(
808             m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(ArrayStorageShape)));
809         break;
810     }
811     default:
812         CRASH();
813         break;
814     }
815     
816     return result;
817 }
818
819 void SpeculativeJIT::checkArray(Node* node)
820 {
821     ASSERT(node->arrayMode().isSpecific());
822     ASSERT(!node->arrayMode().doesConversion());
823     
824     SpeculateCellOperand base(this, node->child1());
825     GPRReg baseReg = base.gpr();
826     
827     if (node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1()))) {
828         noResult(m_currentNode);
829         return;
830     }
831     
832     const ClassInfo* expectedClassInfo = 0;
833     
834     switch (node->arrayMode().type()) {
835     case Array::AnyTypedArray:
836     case Array::String:
837         RELEASE_ASSERT_NOT_REACHED(); // Should have been a Phantom(String:)
838         break;
839     case Array::Int32:
840     case Array::Double:
841     case Array::Contiguous:
842     case Array::Undecided:
843     case Array::ArrayStorage:
844     case Array::SlowPutArrayStorage: {
845         GPRTemporary temp(this);
846         GPRReg tempGPR = temp.gpr();
847         m_jit.load8(MacroAssembler::Address(baseReg, JSCell::indexingTypeAndMiscOffset()), tempGPR);
848         speculationCheck(
849             BadIndexingType, JSValueSource::unboxedCell(baseReg), 0,
850             jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
851         
852         noResult(m_currentNode);
853         return;
854     }
855     case Array::DirectArguments:
856         speculateCellTypeWithoutTypeFiltering(node->child1(), baseReg, DirectArgumentsType);
857         noResult(m_currentNode);
858         return;
859     case Array::ScopedArguments:
860         speculateCellTypeWithoutTypeFiltering(node->child1(), baseReg, ScopedArgumentsType);
861         noResult(m_currentNode);
862         return;
863     default:
864         speculateCellTypeWithoutTypeFiltering(
865             node->child1(), baseReg,
866             typeForTypedArrayType(node->arrayMode().typedArrayType()));
867         noResult(m_currentNode);
868         return;
869     }
870     
871     RELEASE_ASSERT(expectedClassInfo);
872     
873     GPRTemporary temp(this);
874     GPRTemporary temp2(this);
875     m_jit.emitLoadStructure(baseReg, temp.gpr(), temp2.gpr());
876     speculationCheck(
877         BadType, JSValueSource::unboxedCell(baseReg), node,
878         m_jit.branchPtr(
879             MacroAssembler::NotEqual,
880             MacroAssembler::Address(temp.gpr(), Structure::classInfoOffset()),
881             MacroAssembler::TrustedImmPtr(expectedClassInfo)));
882     
883     noResult(m_currentNode);
884 }
885
886 void SpeculativeJIT::arrayify(Node* node, GPRReg baseReg, GPRReg propertyReg)
887 {
888     ASSERT(node->arrayMode().doesConversion());
889     
890     GPRTemporary temp(this);
891     GPRTemporary structure;
892     GPRReg tempGPR = temp.gpr();
893     GPRReg structureGPR = InvalidGPRReg;
894     
895     if (node->op() != ArrayifyToStructure) {
896         GPRTemporary realStructure(this);
897         structure.adopt(realStructure);
898         structureGPR = structure.gpr();
899     }
900         
901     // We can skip all that comes next if we already have array storage.
902     MacroAssembler::JumpList slowPath;
903     
904     if (node->op() == ArrayifyToStructure) {
905         slowPath.append(m_jit.branchWeakStructure(
906             JITCompiler::NotEqual,
907             JITCompiler::Address(baseReg, JSCell::structureIDOffset()),
908             node->structure()));
909     } else {
910         m_jit.load8(
911             MacroAssembler::Address(baseReg, JSCell::indexingTypeAndMiscOffset()), tempGPR);
912         
913         slowPath.append(jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
914     }
915     
916     addSlowPathGenerator(std::make_unique<ArrayifySlowPathGenerator>(
917         slowPath, this, node, baseReg, propertyReg, tempGPR, structureGPR));
918     
919     noResult(m_currentNode);
920 }
921
922 void SpeculativeJIT::arrayify(Node* node)
923 {
924     ASSERT(node->arrayMode().isSpecific());
925     
926     SpeculateCellOperand base(this, node->child1());
927     
928     if (!node->child2()) {
929         arrayify(node, base.gpr(), InvalidGPRReg);
930         return;
931     }
932     
933     SpeculateInt32Operand property(this, node->child2());
934     
935     arrayify(node, base.gpr(), property.gpr());
936 }
937
938 GPRReg SpeculativeJIT::fillStorage(Edge edge)
939 {
940     VirtualRegister virtualRegister = edge->virtualRegister();
941     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
942     
943     switch (info.registerFormat()) {
944     case DataFormatNone: {
945         if (info.spillFormat() == DataFormatStorage) {
946             GPRReg gpr = allocate();
947             m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
948             m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr);
949             info.fillStorage(*m_stream, gpr);
950             return gpr;
951         }
952         
953         // Must be a cell; fill it as a cell and then return the pointer.
954         return fillSpeculateCell(edge);
955     }
956         
957     case DataFormatStorage: {
958         GPRReg gpr = info.gpr();
959         m_gprs.lock(gpr);
960         return gpr;
961     }
962         
963     default:
964         return fillSpeculateCell(edge);
965     }
966 }
967
968 void SpeculativeJIT::useChildren(Node* node)
969 {
970     if (node->flags() & NodeHasVarArgs) {
971         for (unsigned childIdx = node->firstChild(); childIdx < node->firstChild() + node->numChildren(); childIdx++) {
972             if (!!m_jit.graph().m_varArgChildren[childIdx])
973                 use(m_jit.graph().m_varArgChildren[childIdx]);
974         }
975     } else {
976         Edge child1 = node->child1();
977         if (!child1) {
978             ASSERT(!node->child2() && !node->child3());
979             return;
980         }
981         use(child1);
982         
983         Edge child2 = node->child2();
984         if (!child2) {
985             ASSERT(!node->child3());
986             return;
987         }
988         use(child2);
989         
990         Edge child3 = node->child3();
991         if (!child3)
992             return;
993         use(child3);
994     }
995 }
996
997 void SpeculativeJIT::compileTryGetById(Node* node)
998 {
999     switch (node->child1().useKind()) {
1000     case CellUse: {
1001         SpeculateCellOperand base(this, node->child1());
1002         JSValueRegsTemporary result(this, Reuse, base);
1003
1004         JSValueRegs baseRegs = JSValueRegs::payloadOnly(base.gpr());
1005         JSValueRegs resultRegs = result.regs();
1006
1007         base.use();
1008
1009         cachedGetById(node->origin.semantic, baseRegs, resultRegs, node->identifierNumber(), JITCompiler::Jump(), NeedToSpill, AccessType::GetPure);
1010
1011         jsValueResult(resultRegs, node, DataFormatJS, UseChildrenCalledExplicitly);
1012         break;
1013     }
1014
1015     case UntypedUse: {
1016         JSValueOperand base(this, node->child1());
1017         JSValueRegsTemporary result(this, Reuse, base);
1018
1019         JSValueRegs baseRegs = base.jsValueRegs();
1020         JSValueRegs resultRegs = result.regs();
1021
1022         base.use();
1023
1024         JITCompiler::Jump notCell = m_jit.branchIfNotCell(baseRegs);
1025
1026         cachedGetById(node->origin.semantic, baseRegs, resultRegs, node->identifierNumber(), notCell, NeedToSpill, AccessType::GetPure);
1027
1028         jsValueResult(resultRegs, node, DataFormatJS, UseChildrenCalledExplicitly);
1029         break;
1030     }
1031
1032     default:
1033         DFG_CRASH(m_jit.graph(), node, "Bad use kind");
1034         break;
1035     } 
1036 }
1037
1038 void SpeculativeJIT::compileIn(Node* node)
1039 {
1040     SpeculateCellOperand base(this, node->child2());
1041     GPRReg baseGPR = base.gpr();
1042     
1043     if (JSString* string = node->child1()->dynamicCastConstant<JSString*>()) {
1044         if (string->tryGetValueImpl() && string->tryGetValueImpl()->isAtomic()) {
1045             StructureStubInfo* stubInfo = m_jit.codeBlock()->addStubInfo(AccessType::In);
1046             
1047             GPRTemporary result(this);
1048             GPRReg resultGPR = result.gpr();
1049
1050             use(node->child1());
1051             
1052             MacroAssembler::PatchableJump jump = m_jit.patchableJump();
1053             MacroAssembler::Label done = m_jit.label();
1054             
1055             // Since this block is executed only when the result of string->tryGetValueImpl() is atomic,
1056             // we can cast it to const AtomicStringImpl* safely.
1057             auto slowPath = slowPathCall(
1058                 jump.m_jump, this, operationInOptimize,
1059                 JSValueRegs::payloadOnly(resultGPR), stubInfo, baseGPR,
1060                 static_cast<const AtomicStringImpl*>(string->tryGetValueImpl()));
1061             
1062             stubInfo->callSiteIndex = m_jit.addCallSite(node->origin.semantic);
1063             stubInfo->codeOrigin = node->origin.semantic;
1064             stubInfo->patch.baseGPR = static_cast<int8_t>(baseGPR);
1065             stubInfo->patch.valueGPR = static_cast<int8_t>(resultGPR);
1066 #if USE(JSVALUE32_64)
1067             stubInfo->patch.valueTagGPR = static_cast<int8_t>(InvalidGPRReg);
1068             stubInfo->patch.baseTagGPR = static_cast<int8_t>(InvalidGPRReg);
1069 #endif
1070             stubInfo->patch.usedRegisters = usedRegisters();
1071
1072             m_jit.addIn(InRecord(jump, done, slowPath.get(), stubInfo));
1073             addSlowPathGenerator(WTFMove(slowPath));
1074
1075             base.use();
1076
1077             blessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
1078             return;
1079         }
1080     }
1081
1082     JSValueOperand key(this, node->child1());
1083     JSValueRegs regs = key.jsValueRegs();
1084         
1085     GPRFlushedCallResult result(this);
1086     GPRReg resultGPR = result.gpr();
1087         
1088     base.use();
1089     key.use();
1090         
1091     flushRegisters();
1092     callOperation(
1093         operationGenericIn, extractResult(JSValueRegs::payloadOnly(resultGPR)),
1094         baseGPR, regs);
1095     m_jit.exceptionCheck();
1096     blessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
1097 }
1098
1099 void SpeculativeJIT::compileDeleteById(Node* node)
1100 {
1101     JSValueOperand value(this, node->child1());
1102     GPRFlushedCallResult result(this);
1103
1104     JSValueRegs valueRegs = value.jsValueRegs();
1105     GPRReg resultGPR = result.gpr();
1106
1107     value.use();
1108
1109     flushRegisters();
1110     callOperation(operationDeleteById, resultGPR, valueRegs, identifierUID(node->identifierNumber()));
1111     m_jit.exceptionCheck();
1112
1113     unblessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
1114 }
1115
1116 void SpeculativeJIT::compileDeleteByVal(Node* node)
1117 {
1118     JSValueOperand base(this, node->child1());
1119     JSValueOperand key(this, node->child2());
1120     GPRFlushedCallResult result(this);
1121
1122     JSValueRegs baseRegs = base.jsValueRegs();
1123     JSValueRegs keyRegs = key.jsValueRegs();
1124     GPRReg resultGPR = result.gpr();
1125
1126     base.use();
1127     key.use();
1128
1129     flushRegisters();
1130     callOperation(operationDeleteByVal, resultGPR, baseRegs, keyRegs);
1131     m_jit.exceptionCheck();
1132
1133     unblessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
1134 }
1135
1136 bool SpeculativeJIT::nonSpeculativeCompare(Node* node, MacroAssembler::RelationalCondition cond, S_JITOperation_EJJ helperFunction)
1137 {
1138     unsigned branchIndexInBlock = detectPeepHoleBranch();
1139     if (branchIndexInBlock != UINT_MAX) {
1140         Node* branchNode = m_block->at(branchIndexInBlock);
1141
1142         ASSERT(node->adjustedRefCount() == 1);
1143         
1144         nonSpeculativePeepholeBranch(node, branchNode, cond, helperFunction);
1145     
1146         m_indexInBlock = branchIndexInBlock;
1147         m_currentNode = branchNode;
1148         
1149         return true;
1150     }
1151     
1152     nonSpeculativeNonPeepholeCompare(node, cond, helperFunction);
1153     
1154     return false;
1155 }
1156
1157 bool SpeculativeJIT::nonSpeculativeStrictEq(Node* node, bool invert)
1158 {
1159     unsigned branchIndexInBlock = detectPeepHoleBranch();
1160     if (branchIndexInBlock != UINT_MAX) {
1161         Node* branchNode = m_block->at(branchIndexInBlock);
1162
1163         ASSERT(node->adjustedRefCount() == 1);
1164         
1165         nonSpeculativePeepholeStrictEq(node, branchNode, invert);
1166     
1167         m_indexInBlock = branchIndexInBlock;
1168         m_currentNode = branchNode;
1169         
1170         return true;
1171     }
1172     
1173     nonSpeculativeNonPeepholeStrictEq(node, invert);
1174     
1175     return false;
1176 }
1177
1178 static const char* dataFormatString(DataFormat format)
1179 {
1180     // These values correspond to the DataFormat enum.
1181     const char* strings[] = {
1182         "[  ]",
1183         "[ i]",
1184         "[ d]",
1185         "[ c]",
1186         "Err!",
1187         "Err!",
1188         "Err!",
1189         "Err!",
1190         "[J ]",
1191         "[Ji]",
1192         "[Jd]",
1193         "[Jc]",
1194         "Err!",
1195         "Err!",
1196         "Err!",
1197         "Err!",
1198     };
1199     return strings[format];
1200 }
1201
1202 void SpeculativeJIT::dump(const char* label)
1203 {
1204     if (label)
1205         dataLogF("<%s>\n", label);
1206
1207     dataLogF("  gprs:\n");
1208     m_gprs.dump();
1209     dataLogF("  fprs:\n");
1210     m_fprs.dump();
1211     dataLogF("  VirtualRegisters:\n");
1212     for (unsigned i = 0; i < m_generationInfo.size(); ++i) {
1213         GenerationInfo& info = m_generationInfo[i];
1214         if (info.alive())
1215             dataLogF("    % 3d:%s%s", i, dataFormatString(info.registerFormat()), dataFormatString(info.spillFormat()));
1216         else
1217             dataLogF("    % 3d:[__][__]", i);
1218         if (info.registerFormat() == DataFormatDouble)
1219             dataLogF(":fpr%d\n", info.fpr());
1220         else if (info.registerFormat() != DataFormatNone
1221 #if USE(JSVALUE32_64)
1222             && !(info.registerFormat() & DataFormatJS)
1223 #endif
1224             ) {
1225             ASSERT(info.gpr() != InvalidGPRReg);
1226             dataLogF(":%s\n", GPRInfo::debugName(info.gpr()));
1227         } else
1228             dataLogF("\n");
1229     }
1230     if (label)
1231         dataLogF("</%s>\n", label);
1232 }
1233
1234 GPRTemporary::GPRTemporary()
1235     : m_jit(0)
1236     , m_gpr(InvalidGPRReg)
1237 {
1238 }
1239
1240 GPRTemporary::GPRTemporary(SpeculativeJIT* jit)
1241     : m_jit(jit)
1242     , m_gpr(InvalidGPRReg)
1243 {
1244     m_gpr = m_jit->allocate();
1245 }
1246
1247 GPRTemporary::GPRTemporary(SpeculativeJIT* jit, GPRReg specific)
1248     : m_jit(jit)
1249     , m_gpr(InvalidGPRReg)
1250 {
1251     m_gpr = m_jit->allocate(specific);
1252 }
1253
1254 #if USE(JSVALUE32_64)
1255 GPRTemporary::GPRTemporary(
1256     SpeculativeJIT* jit, ReuseTag, JSValueOperand& op1, WhichValueWord which)
1257     : m_jit(jit)
1258     , m_gpr(InvalidGPRReg)
1259 {
1260     if (!op1.isDouble() && m_jit->canReuse(op1.node()))
1261         m_gpr = m_jit->reuse(op1.gpr(which));
1262     else
1263         m_gpr = m_jit->allocate();
1264 }
1265 #endif // USE(JSVALUE32_64)
1266
1267 JSValueRegsTemporary::JSValueRegsTemporary() { }
1268
1269 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit)
1270 #if USE(JSVALUE64)
1271     : m_gpr(jit)
1272 #else
1273     : m_payloadGPR(jit)
1274     , m_tagGPR(jit)
1275 #endif
1276 {
1277 }
1278
1279 #if USE(JSVALUE64)
1280 template<typename T>
1281 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit, ReuseTag, T& operand, WhichValueWord)
1282     : m_gpr(jit, Reuse, operand)
1283 {
1284 }
1285 #else
1286 template<typename T>
1287 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit, ReuseTag, T& operand, WhichValueWord resultWord)
1288 {
1289     if (resultWord == PayloadWord) {
1290         m_payloadGPR = GPRTemporary(jit, Reuse, operand);
1291         m_tagGPR = GPRTemporary(jit);
1292     } else {
1293         m_payloadGPR = GPRTemporary(jit);
1294         m_tagGPR = GPRTemporary(jit, Reuse, operand);
1295     }
1296 }
1297 #endif
1298
1299 #if USE(JSVALUE64)
1300 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit, ReuseTag, JSValueOperand& operand)
1301 {
1302     m_gpr = GPRTemporary(jit, Reuse, operand);
1303 }
1304 #else
1305 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit, ReuseTag, JSValueOperand& operand)
1306 {
1307     if (jit->canReuse(operand.node())) {
1308         m_payloadGPR = GPRTemporary(jit, Reuse, operand, PayloadWord);
1309         m_tagGPR = GPRTemporary(jit, Reuse, operand, TagWord);
1310     } else {
1311         m_payloadGPR = GPRTemporary(jit);
1312         m_tagGPR = GPRTemporary(jit);
1313     }
1314 }
1315 #endif
1316
1317 JSValueRegsTemporary::~JSValueRegsTemporary() { }
1318
1319 JSValueRegs JSValueRegsTemporary::regs()
1320 {
1321 #if USE(JSVALUE64)
1322     return JSValueRegs(m_gpr.gpr());
1323 #else
1324     return JSValueRegs(m_tagGPR.gpr(), m_payloadGPR.gpr());
1325 #endif
1326 }
1327
1328 void GPRTemporary::adopt(GPRTemporary& other)
1329 {
1330     ASSERT(!m_jit);
1331     ASSERT(m_gpr == InvalidGPRReg);
1332     ASSERT(other.m_jit);
1333     ASSERT(other.m_gpr != InvalidGPRReg);
1334     m_jit = other.m_jit;
1335     m_gpr = other.m_gpr;
1336     other.m_jit = 0;
1337     other.m_gpr = InvalidGPRReg;
1338 }
1339
1340 FPRTemporary::FPRTemporary(FPRTemporary&& other)
1341 {
1342     ASSERT(other.m_jit);
1343     ASSERT(other.m_fpr != InvalidFPRReg);
1344     m_jit = other.m_jit;
1345     m_fpr = other.m_fpr;
1346
1347     other.m_jit = nullptr;
1348 }
1349
1350 FPRTemporary::FPRTemporary(SpeculativeJIT* jit)
1351     : m_jit(jit)
1352     , m_fpr(InvalidFPRReg)
1353 {
1354     m_fpr = m_jit->fprAllocate();
1355 }
1356
1357 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1)
1358     : m_jit(jit)
1359     , m_fpr(InvalidFPRReg)
1360 {
1361     if (m_jit->canReuse(op1.node()))
1362         m_fpr = m_jit->reuse(op1.fpr());
1363     else
1364         m_fpr = m_jit->fprAllocate();
1365 }
1366
1367 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1, SpeculateDoubleOperand& op2)
1368     : m_jit(jit)
1369     , m_fpr(InvalidFPRReg)
1370 {
1371     if (m_jit->canReuse(op1.node()))
1372         m_fpr = m_jit->reuse(op1.fpr());
1373     else if (m_jit->canReuse(op2.node()))
1374         m_fpr = m_jit->reuse(op2.fpr());
1375     else if (m_jit->canReuse(op1.node(), op2.node()) && op1.fpr() == op2.fpr())
1376         m_fpr = m_jit->reuse(op1.fpr());
1377     else
1378         m_fpr = m_jit->fprAllocate();
1379 }
1380
1381 #if USE(JSVALUE32_64)
1382 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, JSValueOperand& op1)
1383     : m_jit(jit)
1384     , m_fpr(InvalidFPRReg)
1385 {
1386     if (op1.isDouble() && m_jit->canReuse(op1.node()))
1387         m_fpr = m_jit->reuse(op1.fpr());
1388     else
1389         m_fpr = m_jit->fprAllocate();
1390 }
1391 #endif
1392
1393 void SpeculativeJIT::compilePeepHoleDoubleBranch(Node* node, Node* branchNode, JITCompiler::DoubleCondition condition)
1394 {
1395     BasicBlock* taken = branchNode->branchData()->taken.block;
1396     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1397
1398     if (taken == nextBlock()) {
1399         condition = MacroAssembler::invert(condition);
1400         std::swap(taken, notTaken);
1401     }
1402
1403     SpeculateDoubleOperand op1(this, node->child1());
1404     SpeculateDoubleOperand op2(this, node->child2());
1405     
1406     branchDouble(condition, op1.fpr(), op2.fpr(), taken);
1407     jump(notTaken);
1408 }
1409
1410 void SpeculativeJIT::compilePeepHoleObjectEquality(Node* node, Node* branchNode)
1411 {
1412     BasicBlock* taken = branchNode->branchData()->taken.block;
1413     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1414
1415     MacroAssembler::RelationalCondition condition = MacroAssembler::Equal;
1416     
1417     if (taken == nextBlock()) {
1418         condition = MacroAssembler::NotEqual;
1419         BasicBlock* tmp = taken;
1420         taken = notTaken;
1421         notTaken = tmp;
1422     }
1423
1424     SpeculateCellOperand op1(this, node->child1());
1425     SpeculateCellOperand op2(this, node->child2());
1426     
1427     GPRReg op1GPR = op1.gpr();
1428     GPRReg op2GPR = op2.gpr();
1429     
1430     if (masqueradesAsUndefinedWatchpointIsStillValid()) {
1431         if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1432             speculationCheck(
1433                 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(), m_jit.branchIfNotObject(op1GPR));
1434         }
1435         if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1436             speculationCheck(
1437                 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(), m_jit.branchIfNotObject(op2GPR));
1438         }
1439     } else {
1440         if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1441             speculationCheck(
1442                 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1443                 m_jit.branchIfNotObject(op1GPR));
1444         }
1445         speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1446             m_jit.branchTest8(
1447                 MacroAssembler::NonZero, 
1448                 MacroAssembler::Address(op1GPR, JSCell::typeInfoFlagsOffset()), 
1449                 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1450
1451         if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1452             speculationCheck(
1453                 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1454                 m_jit.branchIfNotObject(op2GPR));
1455         }
1456         speculationCheck(BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1457             m_jit.branchTest8(
1458                 MacroAssembler::NonZero, 
1459                 MacroAssembler::Address(op2GPR, JSCell::typeInfoFlagsOffset()), 
1460                 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1461     }
1462
1463     branchPtr(condition, op1GPR, op2GPR, taken);
1464     jump(notTaken);
1465 }
1466
1467 void SpeculativeJIT::compilePeepHoleBooleanBranch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1468 {
1469     BasicBlock* taken = branchNode->branchData()->taken.block;
1470     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1471
1472     // The branch instruction will branch to the taken block.
1473     // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1474     if (taken == nextBlock()) {
1475         condition = JITCompiler::invert(condition);
1476         BasicBlock* tmp = taken;
1477         taken = notTaken;
1478         notTaken = tmp;
1479     }
1480
1481     if (node->child1()->isInt32Constant()) {
1482         int32_t imm = node->child1()->asInt32();
1483         SpeculateBooleanOperand op2(this, node->child2());
1484         branch32(condition, JITCompiler::Imm32(imm), op2.gpr(), taken);
1485     } else if (node->child2()->isInt32Constant()) {
1486         SpeculateBooleanOperand op1(this, node->child1());
1487         int32_t imm = node->child2()->asInt32();
1488         branch32(condition, op1.gpr(), JITCompiler::Imm32(imm), taken);
1489     } else {
1490         SpeculateBooleanOperand op1(this, node->child1());
1491         SpeculateBooleanOperand op2(this, node->child2());
1492         branch32(condition, op1.gpr(), op2.gpr(), taken);
1493     }
1494
1495     jump(notTaken);
1496 }
1497
1498 void SpeculativeJIT::compileToLowerCase(Node* node)
1499 {
1500     ASSERT(node->op() == ToLowerCase);
1501     SpeculateCellOperand string(this, node->child1());
1502     GPRTemporary temp(this);
1503     GPRTemporary index(this);
1504     GPRTemporary charReg(this);
1505     GPRTemporary length(this);
1506
1507     GPRReg stringGPR = string.gpr();
1508     GPRReg tempGPR = temp.gpr();
1509     GPRReg indexGPR = index.gpr();
1510     GPRReg charGPR = charReg.gpr();
1511     GPRReg lengthGPR = length.gpr();
1512
1513     speculateString(node->child1(), stringGPR);
1514
1515     CCallHelpers::JumpList slowPath;
1516
1517     m_jit.move(TrustedImmPtr(0), indexGPR);
1518
1519     m_jit.loadPtr(MacroAssembler::Address(stringGPR, JSString::offsetOfValue()), tempGPR);
1520     slowPath.append(m_jit.branchTestPtr(MacroAssembler::Zero, tempGPR));
1521
1522     slowPath.append(m_jit.branchTest32(
1523         MacroAssembler::Zero, MacroAssembler::Address(tempGPR, StringImpl::flagsOffset()),
1524         MacroAssembler::TrustedImm32(StringImpl::flagIs8Bit())));
1525     m_jit.load32(MacroAssembler::Address(tempGPR, StringImpl::lengthMemoryOffset()), lengthGPR);
1526     m_jit.loadPtr(MacroAssembler::Address(tempGPR, StringImpl::dataOffset()), tempGPR);
1527
1528     auto loopStart = m_jit.label();
1529     auto loopDone = m_jit.branch32(CCallHelpers::AboveOrEqual, indexGPR, lengthGPR);
1530     m_jit.load8(MacroAssembler::BaseIndex(tempGPR, indexGPR, MacroAssembler::TimesOne), charGPR);
1531     slowPath.append(m_jit.branchTest32(CCallHelpers::NonZero, charGPR, TrustedImm32(~0x7F)));
1532     m_jit.sub32(TrustedImm32('A'), charGPR);
1533     slowPath.append(m_jit.branch32(CCallHelpers::BelowOrEqual, charGPR, TrustedImm32('Z' - 'A')));
1534
1535     m_jit.add32(TrustedImm32(1), indexGPR);
1536     m_jit.jump().linkTo(loopStart, &m_jit);
1537     
1538     slowPath.link(&m_jit);
1539     silentSpillAllRegisters(lengthGPR);
1540     callOperation(operationToLowerCase, lengthGPR, stringGPR, indexGPR);
1541     silentFillAllRegisters(lengthGPR);
1542     m_jit.exceptionCheck();
1543     auto done = m_jit.jump();
1544
1545     loopDone.link(&m_jit);
1546     m_jit.move(stringGPR, lengthGPR);
1547
1548     done.link(&m_jit);
1549     cellResult(lengthGPR, node);
1550 }
1551
1552 void SpeculativeJIT::compilePeepHoleInt32Branch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1553 {
1554     BasicBlock* taken = branchNode->branchData()->taken.block;
1555     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1556
1557     // The branch instruction will branch to the taken block.
1558     // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1559     if (taken == nextBlock()) {
1560         condition = JITCompiler::invert(condition);
1561         BasicBlock* tmp = taken;
1562         taken = notTaken;
1563         notTaken = tmp;
1564     }
1565
1566     if (node->child1()->isInt32Constant()) {
1567         int32_t imm = node->child1()->asInt32();
1568         SpeculateInt32Operand op2(this, node->child2());
1569         branch32(condition, JITCompiler::Imm32(imm), op2.gpr(), taken);
1570     } else if (node->child2()->isInt32Constant()) {
1571         SpeculateInt32Operand op1(this, node->child1());
1572         int32_t imm = node->child2()->asInt32();
1573         branch32(condition, op1.gpr(), JITCompiler::Imm32(imm), taken);
1574     } else {
1575         SpeculateInt32Operand op1(this, node->child1());
1576         SpeculateInt32Operand op2(this, node->child2());
1577         branch32(condition, op1.gpr(), op2.gpr(), taken);
1578     }
1579
1580     jump(notTaken);
1581 }
1582
1583 // Returns true if the compare is fused with a subsequent branch.
1584 bool SpeculativeJIT::compilePeepHoleBranch(Node* node, MacroAssembler::RelationalCondition condition, MacroAssembler::DoubleCondition doubleCondition, S_JITOperation_EJJ operation)
1585 {
1586     // Fused compare & branch.
1587     unsigned branchIndexInBlock = detectPeepHoleBranch();
1588     if (branchIndexInBlock != UINT_MAX) {
1589         Node* branchNode = m_block->at(branchIndexInBlock);
1590
1591         // detectPeepHoleBranch currently only permits the branch to be the very next node,
1592         // so can be no intervening nodes to also reference the compare. 
1593         ASSERT(node->adjustedRefCount() == 1);
1594
1595         if (node->isBinaryUseKind(Int32Use))
1596             compilePeepHoleInt32Branch(node, branchNode, condition);
1597 #if USE(JSVALUE64)
1598         else if (node->isBinaryUseKind(Int52RepUse))
1599             compilePeepHoleInt52Branch(node, branchNode, condition);
1600 #endif // USE(JSVALUE64)
1601         else if (node->isBinaryUseKind(StringUse) || node->isBinaryUseKind(StringIdentUse)) {
1602             // Use non-peephole comparison, for now.
1603             return false;
1604         } else if (node->isBinaryUseKind(DoubleRepUse))
1605             compilePeepHoleDoubleBranch(node, branchNode, doubleCondition);
1606         else if (node->op() == CompareEq) {
1607             if (node->isBinaryUseKind(BooleanUse))
1608                 compilePeepHoleBooleanBranch(node, branchNode, condition);
1609             else if (node->isBinaryUseKind(SymbolUse))
1610                 compilePeepHoleSymbolEquality(node, branchNode);
1611             else if (node->isBinaryUseKind(ObjectUse))
1612                 compilePeepHoleObjectEquality(node, branchNode);
1613             else if (node->isBinaryUseKind(ObjectUse, ObjectOrOtherUse))
1614                 compilePeepHoleObjectToObjectOrOtherEquality(node->child1(), node->child2(), branchNode);
1615             else if (node->isBinaryUseKind(ObjectOrOtherUse, ObjectUse))
1616                 compilePeepHoleObjectToObjectOrOtherEquality(node->child2(), node->child1(), branchNode);
1617             else if (!needsTypeCheck(node->child1(), SpecOther))
1618                 nonSpeculativePeepholeBranchNullOrUndefined(node->child2(), branchNode);
1619             else if (!needsTypeCheck(node->child2(), SpecOther))
1620                 nonSpeculativePeepholeBranchNullOrUndefined(node->child1(), branchNode);
1621             else {
1622                 nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1623                 return true;
1624             }
1625         } else {
1626             nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1627             return true;
1628         }
1629
1630         use(node->child1());
1631         use(node->child2());
1632         m_indexInBlock = branchIndexInBlock;
1633         m_currentNode = branchNode;
1634         return true;
1635     }
1636     return false;
1637 }
1638
1639 void SpeculativeJIT::noticeOSRBirth(Node* node)
1640 {
1641     if (!node->hasVirtualRegister())
1642         return;
1643     
1644     VirtualRegister virtualRegister = node->virtualRegister();
1645     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1646     
1647     info.noticeOSRBirth(*m_stream, node, virtualRegister);
1648 }
1649
1650 void SpeculativeJIT::compileMovHint(Node* node)
1651 {
1652     ASSERT(node->containsMovHint() && node->op() != ZombieHint);
1653     
1654     Node* child = node->child1().node();
1655     noticeOSRBirth(child);
1656     
1657     m_stream->appendAndLog(VariableEvent::movHint(MinifiedID(child), node->unlinkedLocal()));
1658 }
1659
1660 void SpeculativeJIT::bail(AbortReason reason)
1661 {
1662     if (verboseCompilationEnabled())
1663         dataLog("Bailing compilation.\n");
1664     m_compileOkay = true;
1665     m_jit.abortWithReason(reason, m_lastGeneratedNode);
1666     clearGenerationInfo();
1667 }
1668
1669 void SpeculativeJIT::compileCurrentBlock()
1670 {
1671     ASSERT(m_compileOkay);
1672     
1673     if (!m_block)
1674         return;
1675     
1676     ASSERT(m_block->isReachable);
1677     
1678     m_jit.blockHeads()[m_block->index] = m_jit.label();
1679
1680     if (!m_block->intersectionOfCFAHasVisited) {
1681         // Don't generate code for basic blocks that are unreachable according to CFA.
1682         // But to be sure that nobody has generated a jump to this block, drop in a
1683         // breakpoint here.
1684         m_jit.abortWithReason(DFGUnreachableBasicBlock);
1685         return;
1686     }
1687
1688     m_stream->appendAndLog(VariableEvent::reset());
1689     
1690     m_jit.jitAssertHasValidCallFrame();
1691     m_jit.jitAssertTagsInPlace();
1692     m_jit.jitAssertArgumentCountSane();
1693
1694     m_state.reset();
1695     m_state.beginBasicBlock(m_block);
1696     
1697     for (size_t i = m_block->variablesAtHead.size(); i--;) {
1698         int operand = m_block->variablesAtHead.operandForIndex(i);
1699         Node* node = m_block->variablesAtHead[i];
1700         if (!node)
1701             continue; // No need to record dead SetLocal's.
1702         
1703         VariableAccessData* variable = node->variableAccessData();
1704         DataFormat format;
1705         if (!node->refCount())
1706             continue; // No need to record dead SetLocal's.
1707         format = dataFormatFor(variable->flushFormat());
1708         m_stream->appendAndLog(
1709             VariableEvent::setLocal(
1710                 VirtualRegister(operand),
1711                 variable->machineLocal(),
1712                 format));
1713     }
1714
1715     m_origin = NodeOrigin();
1716     
1717     for (m_indexInBlock = 0; m_indexInBlock < m_block->size(); ++m_indexInBlock) {
1718         m_currentNode = m_block->at(m_indexInBlock);
1719         
1720         // We may have hit a contradiction that the CFA was aware of but that the JIT
1721         // didn't cause directly.
1722         if (!m_state.isValid()) {
1723             bail(DFGBailedAtTopOfBlock);
1724             return;
1725         }
1726
1727         m_interpreter.startExecuting();
1728         m_interpreter.executeKnownEdgeTypes(m_currentNode);
1729         m_jit.setForNode(m_currentNode);
1730         m_origin = m_currentNode->origin;
1731         if (validationEnabled())
1732             m_origin.exitOK &= mayExit(m_jit.graph(), m_currentNode) == Exits;
1733         m_lastGeneratedNode = m_currentNode->op();
1734         
1735         ASSERT(m_currentNode->shouldGenerate());
1736         
1737         if (verboseCompilationEnabled()) {
1738             dataLogF(
1739                 "SpeculativeJIT generating Node @%d (bc#%u) at JIT offset 0x%x",
1740                 (int)m_currentNode->index(),
1741                 m_currentNode->origin.semantic.bytecodeIndex, m_jit.debugOffset());
1742             dataLog("\n");
1743         }
1744
1745         if (Options::validateDFGExceptionHandling() && (mayExit(m_jit.graph(), m_currentNode) != DoesNotExit || m_currentNode->isTerminal()))
1746             m_jit.jitReleaseAssertNoException();
1747
1748         m_jit.pcToCodeOriginMapBuilder().appendItem(m_jit.labelIgnoringWatchpoints(), m_origin.semantic);
1749
1750         compile(m_currentNode);
1751         
1752         if (belongsInMinifiedGraph(m_currentNode->op()))
1753             m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode));
1754         
1755 #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
1756         m_jit.clearRegisterAllocationOffsets();
1757 #endif
1758         
1759         if (!m_compileOkay) {
1760             bail(DFGBailedAtEndOfNode);
1761             return;
1762         }
1763         
1764         // Make sure that the abstract state is rematerialized for the next node.
1765         m_interpreter.executeEffects(m_indexInBlock);
1766     }
1767     
1768     // Perform the most basic verification that children have been used correctly.
1769     if (!ASSERT_DISABLED) {
1770         for (auto& info : m_generationInfo)
1771             RELEASE_ASSERT(!info.alive());
1772     }
1773 }
1774
1775 // If we are making type predictions about our arguments then
1776 // we need to check that they are correct on function entry.
1777 void SpeculativeJIT::checkArgumentTypes()
1778 {
1779     ASSERT(!m_currentNode);
1780     m_origin = NodeOrigin(CodeOrigin(0), CodeOrigin(0), true);
1781
1782     for (int i = 0; i < m_jit.codeBlock()->numParameters(); ++i) {
1783         Node* node = m_jit.graph().m_arguments[i];
1784         if (!node) {
1785             // The argument is dead. We don't do any checks for such arguments.
1786             continue;
1787         }
1788         
1789         ASSERT(node->op() == SetArgument);
1790         ASSERT(node->shouldGenerate());
1791
1792         VariableAccessData* variableAccessData = node->variableAccessData();
1793         FlushFormat format = variableAccessData->flushFormat();
1794         
1795         if (format == FlushedJSValue)
1796             continue;
1797         
1798         VirtualRegister virtualRegister = variableAccessData->local();
1799
1800         JSValueSource valueSource = JSValueSource(JITCompiler::addressFor(virtualRegister));
1801         
1802 #if USE(JSVALUE64)
1803         switch (format) {
1804         case FlushedInt32: {
1805             speculationCheck(BadType, valueSource, node, m_jit.branch64(MacroAssembler::Below, JITCompiler::addressFor(virtualRegister), GPRInfo::tagTypeNumberRegister));
1806             break;
1807         }
1808         case FlushedBoolean: {
1809             GPRTemporary temp(this);
1810             m_jit.load64(JITCompiler::addressFor(virtualRegister), temp.gpr());
1811             m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), temp.gpr());
1812             speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, temp.gpr(), TrustedImm32(static_cast<int32_t>(~1))));
1813             break;
1814         }
1815         case FlushedCell: {
1816             speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, JITCompiler::addressFor(virtualRegister), GPRInfo::tagMaskRegister));
1817             break;
1818         }
1819         default:
1820             RELEASE_ASSERT_NOT_REACHED();
1821             break;
1822         }
1823 #else
1824         switch (format) {
1825         case FlushedInt32: {
1826             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::Int32Tag)));
1827             break;
1828         }
1829         case FlushedBoolean: {
1830             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::BooleanTag)));
1831             break;
1832         }
1833         case FlushedCell: {
1834             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::CellTag)));
1835             break;
1836         }
1837         default:
1838             RELEASE_ASSERT_NOT_REACHED();
1839             break;
1840         }
1841 #endif
1842     }
1843
1844     m_origin = NodeOrigin();
1845 }
1846
1847 bool SpeculativeJIT::compile()
1848 {
1849     checkArgumentTypes();
1850     
1851     ASSERT(!m_currentNode);
1852     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1853         m_jit.setForBlockIndex(blockIndex);
1854         m_block = m_jit.graph().block(blockIndex);
1855         compileCurrentBlock();
1856     }
1857     linkBranches();
1858     return true;
1859 }
1860
1861 void SpeculativeJIT::createOSREntries()
1862 {
1863     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1864         BasicBlock* block = m_jit.graph().block(blockIndex);
1865         if (!block)
1866             continue;
1867         if (!block->isOSRTarget)
1868             continue;
1869         
1870         // Currently we don't have OSR entry trampolines. We could add them
1871         // here if need be.
1872         m_osrEntryHeads.append(m_jit.blockHeads()[blockIndex]);
1873     }
1874 }
1875
1876 void SpeculativeJIT::linkOSREntries(LinkBuffer& linkBuffer)
1877 {
1878     unsigned osrEntryIndex = 0;
1879     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1880         BasicBlock* block = m_jit.graph().block(blockIndex);
1881         if (!block)
1882             continue;
1883         if (!block->isOSRTarget)
1884             continue;
1885         m_jit.noticeOSREntry(*block, m_osrEntryHeads[osrEntryIndex++], linkBuffer);
1886     }
1887     ASSERT(osrEntryIndex == m_osrEntryHeads.size());
1888     
1889     if (verboseCompilationEnabled()) {
1890         DumpContext dumpContext;
1891         dataLog("OSR Entries:\n");
1892         for (OSREntryData& entryData : m_jit.jitCode()->osrEntry)
1893             dataLog("    ", inContext(entryData, &dumpContext), "\n");
1894         if (!dumpContext.isEmpty())
1895             dumpContext.dump(WTF::dataFile());
1896     }
1897 }
1898
1899 void SpeculativeJIT::compileDoublePutByVal(Node* node, SpeculateCellOperand& base, SpeculateStrictInt32Operand& property)
1900 {
1901     Edge child3 = m_jit.graph().varArgChild(node, 2);
1902     Edge child4 = m_jit.graph().varArgChild(node, 3);
1903
1904     ArrayMode arrayMode = node->arrayMode();
1905     
1906     GPRReg baseReg = base.gpr();
1907     GPRReg propertyReg = property.gpr();
1908     
1909     SpeculateDoubleOperand value(this, child3);
1910
1911     FPRReg valueReg = value.fpr();
1912     
1913     DFG_TYPE_CHECK(
1914         JSValueRegs(), child3, SpecFullRealNumber,
1915         m_jit.branchDouble(
1916             MacroAssembler::DoubleNotEqualOrUnordered, valueReg, valueReg));
1917     
1918     if (!m_compileOkay)
1919         return;
1920     
1921     StorageOperand storage(this, child4);
1922     GPRReg storageReg = storage.gpr();
1923
1924     if (node->op() == PutByValAlias) {
1925         // Store the value to the array.
1926         GPRReg propertyReg = property.gpr();
1927         FPRReg valueReg = value.fpr();
1928         m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
1929         
1930         noResult(m_currentNode);
1931         return;
1932     }
1933     
1934     GPRTemporary temporary;
1935     GPRReg temporaryReg = temporaryRegisterForPutByVal(temporary, node);
1936
1937     MacroAssembler::Jump slowCase;
1938     
1939     if (arrayMode.isInBounds()) {
1940         speculationCheck(
1941             OutOfBounds, JSValueRegs(), 0,
1942             m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())));
1943     } else {
1944         MacroAssembler::Jump inBounds = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
1945         
1946         slowCase = m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfVectorLength()));
1947         
1948         if (!arrayMode.isOutOfBounds())
1949             speculationCheck(OutOfBounds, JSValueRegs(), 0, slowCase);
1950         
1951         m_jit.add32(TrustedImm32(1), propertyReg, temporaryReg);
1952         m_jit.store32(temporaryReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
1953         
1954         inBounds.link(&m_jit);
1955     }
1956     
1957     m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
1958
1959     base.use();
1960     property.use();
1961     value.use();
1962     storage.use();
1963     
1964     if (arrayMode.isOutOfBounds()) {
1965         addSlowPathGenerator(
1966             slowPathCall(
1967                 slowCase, this,
1968                 m_jit.codeBlock()->isStrictMode() ? operationPutDoubleByValBeyondArrayBoundsStrict : operationPutDoubleByValBeyondArrayBoundsNonStrict,
1969                 NoResult, baseReg, propertyReg, valueReg));
1970     }
1971
1972     noResult(m_currentNode, UseChildrenCalledExplicitly);
1973 }
1974
1975 void SpeculativeJIT::compileGetCharCodeAt(Node* node)
1976 {
1977     SpeculateCellOperand string(this, node->child1());
1978     SpeculateStrictInt32Operand index(this, node->child2());
1979     StorageOperand storage(this, node->child3());
1980
1981     GPRReg stringReg = string.gpr();
1982     GPRReg indexReg = index.gpr();
1983     GPRReg storageReg = storage.gpr();
1984     
1985     ASSERT(speculationChecked(m_state.forNode(node->child1()).m_type, SpecString));
1986
1987     // unsigned comparison so we can filter out negative indices and indices that are too large
1988     speculationCheck(Uncountable, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::AboveOrEqual, indexReg, MacroAssembler::Address(stringReg, JSString::offsetOfLength())));
1989
1990     GPRTemporary scratch(this);
1991     GPRReg scratchReg = scratch.gpr();
1992
1993     m_jit.loadPtr(MacroAssembler::Address(stringReg, JSString::offsetOfValue()), scratchReg);
1994
1995     // Load the character into scratchReg
1996     JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
1997
1998     m_jit.load8(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesOne, 0), scratchReg);
1999     JITCompiler::Jump cont8Bit = m_jit.jump();
2000
2001     is16Bit.link(&m_jit);
2002
2003     m_jit.load16(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesTwo, 0), scratchReg);
2004
2005     cont8Bit.link(&m_jit);
2006
2007     int32Result(scratchReg, m_currentNode);
2008 }
2009
2010 void SpeculativeJIT::compileGetByValOnString(Node* node)
2011 {
2012     SpeculateCellOperand base(this, node->child1());
2013     SpeculateStrictInt32Operand property(this, node->child2());
2014     StorageOperand storage(this, node->child3());
2015     GPRReg baseReg = base.gpr();
2016     GPRReg propertyReg = property.gpr();
2017     GPRReg storageReg = storage.gpr();
2018
2019     GPRTemporary scratch(this);
2020     GPRReg scratchReg = scratch.gpr();
2021 #if USE(JSVALUE32_64)
2022     GPRTemporary resultTag;
2023     GPRReg resultTagReg = InvalidGPRReg;
2024     if (node->arrayMode().isOutOfBounds()) {
2025         GPRTemporary realResultTag(this);
2026         resultTag.adopt(realResultTag);
2027         resultTagReg = resultTag.gpr();
2028     }
2029 #endif
2030
2031     ASSERT(ArrayMode(Array::String).alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2032
2033     // unsigned comparison so we can filter out negative indices and indices that are too large
2034     JITCompiler::Jump outOfBounds = m_jit.branch32(
2035         MacroAssembler::AboveOrEqual, propertyReg,
2036         MacroAssembler::Address(baseReg, JSString::offsetOfLength()));
2037     if (node->arrayMode().isInBounds())
2038         speculationCheck(OutOfBounds, JSValueRegs(), 0, outOfBounds);
2039
2040     m_jit.loadPtr(MacroAssembler::Address(baseReg, JSString::offsetOfValue()), scratchReg);
2041
2042     // Load the character into scratchReg
2043     JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
2044
2045     m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne, 0), scratchReg);
2046     JITCompiler::Jump cont8Bit = m_jit.jump();
2047
2048     is16Bit.link(&m_jit);
2049
2050     m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo, 0), scratchReg);
2051
2052     JITCompiler::Jump bigCharacter =
2053         m_jit.branch32(MacroAssembler::AboveOrEqual, scratchReg, TrustedImm32(0x100));
2054
2055     // 8 bit string values don't need the isASCII check.
2056     cont8Bit.link(&m_jit);
2057
2058     m_jit.lshift32(MacroAssembler::TrustedImm32(sizeof(void*) == 4 ? 2 : 3), scratchReg);
2059     m_jit.addPtr(MacroAssembler::TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), scratchReg);
2060     m_jit.loadPtr(scratchReg, scratchReg);
2061
2062     addSlowPathGenerator(
2063         slowPathCall(
2064             bigCharacter, this, operationSingleCharacterString, scratchReg, scratchReg));
2065
2066     if (node->arrayMode().isOutOfBounds()) {
2067 #if USE(JSVALUE32_64)
2068         m_jit.move(TrustedImm32(JSValue::CellTag), resultTagReg);
2069 #endif
2070
2071         JSGlobalObject* globalObject = m_jit.globalObjectFor(node->origin.semantic);
2072         bool prototypeChainIsSane = false;
2073         if (globalObject->stringPrototypeChainIsSane()) {
2074             // FIXME: This could be captured using a Speculation mode that means "out-of-bounds
2075             // loads return a trivial value". Something like SaneChainOutOfBounds. This should
2076             // speculate that we don't take negative out-of-bounds, or better yet, it should rely
2077             // on a stringPrototypeChainIsSane() guaranteeing that the prototypes have no negative
2078             // indexed properties either.
2079             // https://bugs.webkit.org/show_bug.cgi?id=144668
2080             m_jit.graph().watchpoints().addLazily(globalObject->stringPrototype()->structure()->transitionWatchpointSet());
2081             m_jit.graph().watchpoints().addLazily(globalObject->objectPrototype()->structure()->transitionWatchpointSet());
2082             prototypeChainIsSane = globalObject->stringPrototypeChainIsSane();
2083         }
2084         if (prototypeChainIsSane) {
2085             m_jit.graph().watchpoints().addLazily(globalObject->stringPrototype()->structure()->transitionWatchpointSet());
2086             m_jit.graph().watchpoints().addLazily(globalObject->objectPrototype()->structure()->transitionWatchpointSet());
2087             
2088 #if USE(JSVALUE64)
2089             addSlowPathGenerator(std::make_unique<SaneStringGetByValSlowPathGenerator>(
2090                 outOfBounds, this, JSValueRegs(scratchReg), baseReg, propertyReg));
2091 #else
2092             addSlowPathGenerator(std::make_unique<SaneStringGetByValSlowPathGenerator>(
2093                 outOfBounds, this, JSValueRegs(resultTagReg, scratchReg),
2094                 baseReg, propertyReg));
2095 #endif
2096         } else {
2097 #if USE(JSVALUE64)
2098             addSlowPathGenerator(
2099                 slowPathCall(
2100                     outOfBounds, this, operationGetByValStringInt,
2101                     scratchReg, baseReg, propertyReg));
2102 #else
2103             addSlowPathGenerator(
2104                 slowPathCall(
2105                     outOfBounds, this, operationGetByValStringInt,
2106                     JSValueRegs(resultTagReg, scratchReg), baseReg, propertyReg));
2107 #endif
2108         }
2109         
2110 #if USE(JSVALUE64)
2111         jsValueResult(scratchReg, m_currentNode);
2112 #else
2113         jsValueResult(resultTagReg, scratchReg, m_currentNode);
2114 #endif
2115     } else
2116         cellResult(scratchReg, m_currentNode);
2117 }
2118
2119 void SpeculativeJIT::compileFromCharCode(Node* node)
2120 {
2121     Edge& child = node->child1();
2122     if (child.useKind() == UntypedUse) {
2123         JSValueOperand opr(this, child);
2124         JSValueRegs oprRegs = opr.jsValueRegs();
2125 #if USE(JSVALUE64)
2126         GPRTemporary result(this);
2127         JSValueRegs resultRegs = JSValueRegs(result.gpr());
2128 #else
2129         GPRTemporary resultTag(this);
2130         GPRTemporary resultPayload(this);
2131         JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
2132 #endif
2133         flushRegisters();
2134         callOperation(operationStringFromCharCodeUntyped, resultRegs, oprRegs);
2135         m_jit.exceptionCheck();
2136         
2137         jsValueResult(resultRegs, node);
2138         return;
2139     }
2140
2141     SpeculateStrictInt32Operand property(this, child);
2142     GPRReg propertyReg = property.gpr();
2143     GPRTemporary smallStrings(this);
2144     GPRTemporary scratch(this);
2145     GPRReg scratchReg = scratch.gpr();
2146     GPRReg smallStringsReg = smallStrings.gpr();
2147
2148     JITCompiler::JumpList slowCases;
2149     slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, TrustedImm32(0xff)));
2150     m_jit.move(MacroAssembler::TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), smallStringsReg);
2151     m_jit.loadPtr(MacroAssembler::BaseIndex(smallStringsReg, propertyReg, MacroAssembler::ScalePtr, 0), scratchReg);
2152
2153     slowCases.append(m_jit.branchTest32(MacroAssembler::Zero, scratchReg));
2154     addSlowPathGenerator(slowPathCall(slowCases, this, operationStringFromCharCode, scratchReg, propertyReg));
2155     cellResult(scratchReg, m_currentNode);
2156 }
2157
2158 GeneratedOperandType SpeculativeJIT::checkGeneratedTypeForToInt32(Node* node)
2159 {
2160     VirtualRegister virtualRegister = node->virtualRegister();
2161     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
2162
2163     switch (info.registerFormat()) {
2164     case DataFormatStorage:
2165         RELEASE_ASSERT_NOT_REACHED();
2166
2167     case DataFormatBoolean:
2168     case DataFormatCell:
2169         terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
2170         return GeneratedOperandTypeUnknown;
2171
2172     case DataFormatNone:
2173     case DataFormatJSCell:
2174     case DataFormatJS:
2175     case DataFormatJSBoolean:
2176     case DataFormatJSDouble:
2177         return GeneratedOperandJSValue;
2178
2179     case DataFormatJSInt32:
2180     case DataFormatInt32:
2181         return GeneratedOperandInteger;
2182
2183     default:
2184         RELEASE_ASSERT_NOT_REACHED();
2185         return GeneratedOperandTypeUnknown;
2186     }
2187 }
2188
2189 void SpeculativeJIT::compileValueToInt32(Node* node)
2190 {
2191     switch (node->child1().useKind()) {
2192 #if USE(JSVALUE64)
2193     case Int52RepUse: {
2194         SpeculateStrictInt52Operand op1(this, node->child1());
2195         GPRTemporary result(this, Reuse, op1);
2196         GPRReg op1GPR = op1.gpr();
2197         GPRReg resultGPR = result.gpr();
2198         m_jit.zeroExtend32ToPtr(op1GPR, resultGPR);
2199         int32Result(resultGPR, node, DataFormatInt32);
2200         return;
2201     }
2202 #endif // USE(JSVALUE64)
2203         
2204     case DoubleRepUse: {
2205         GPRTemporary result(this);
2206         SpeculateDoubleOperand op1(this, node->child1());
2207         FPRReg fpr = op1.fpr();
2208         GPRReg gpr = result.gpr();
2209         JITCompiler::Jump notTruncatedToInteger = m_jit.branchTruncateDoubleToInt32(fpr, gpr, JITCompiler::BranchIfTruncateFailed);
2210         
2211         addSlowPathGenerator(slowPathCall(notTruncatedToInteger, this, operationToInt32, NeedToSpill, ExceptionCheckRequirement::CheckNotNeeded, gpr, fpr));
2212         
2213         int32Result(gpr, node);
2214         return;
2215     }
2216     
2217     case NumberUse:
2218     case NotCellUse: {
2219         switch (checkGeneratedTypeForToInt32(node->child1().node())) {
2220         case GeneratedOperandInteger: {
2221             SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
2222             GPRTemporary result(this, Reuse, op1);
2223             m_jit.move(op1.gpr(), result.gpr());
2224             int32Result(result.gpr(), node, op1.format());
2225             return;
2226         }
2227         case GeneratedOperandJSValue: {
2228             GPRTemporary result(this);
2229 #if USE(JSVALUE64)
2230             JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2231
2232             GPRReg gpr = op1.gpr();
2233             GPRReg resultGpr = result.gpr();
2234             FPRTemporary tempFpr(this);
2235             FPRReg fpr = tempFpr.fpr();
2236
2237             JITCompiler::Jump isInteger = m_jit.branch64(MacroAssembler::AboveOrEqual, gpr, GPRInfo::tagTypeNumberRegister);
2238             JITCompiler::JumpList converted;
2239
2240             if (node->child1().useKind() == NumberUse) {
2241                 DFG_TYPE_CHECK(
2242                     JSValueRegs(gpr), node->child1(), SpecBytecodeNumber,
2243                     m_jit.branchTest64(
2244                         MacroAssembler::Zero, gpr, GPRInfo::tagTypeNumberRegister));
2245             } else {
2246                 JITCompiler::Jump isNumber = m_jit.branchTest64(MacroAssembler::NonZero, gpr, GPRInfo::tagTypeNumberRegister);
2247                 
2248                 DFG_TYPE_CHECK(
2249                     JSValueRegs(gpr), node->child1(), ~SpecCell, m_jit.branchIfCell(JSValueRegs(gpr)));
2250                 
2251                 // It's not a cell: so true turns into 1 and all else turns into 0.
2252                 m_jit.compare64(JITCompiler::Equal, gpr, TrustedImm32(ValueTrue), resultGpr);
2253                 converted.append(m_jit.jump());
2254                 
2255                 isNumber.link(&m_jit);
2256             }
2257
2258             // First, if we get here we have a double encoded as a JSValue
2259             unboxDouble(gpr, resultGpr, fpr);
2260
2261             silentSpillAllRegisters(resultGpr);
2262             callOperation(operationToInt32, resultGpr, fpr);
2263             silentFillAllRegisters(resultGpr);
2264
2265             converted.append(m_jit.jump());
2266
2267             isInteger.link(&m_jit);
2268             m_jit.zeroExtend32ToPtr(gpr, resultGpr);
2269
2270             converted.link(&m_jit);
2271 #else
2272             Node* childNode = node->child1().node();
2273             VirtualRegister virtualRegister = childNode->virtualRegister();
2274             GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
2275
2276             JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2277
2278             GPRReg payloadGPR = op1.payloadGPR();
2279             GPRReg resultGpr = result.gpr();
2280         
2281             JITCompiler::JumpList converted;
2282
2283             if (info.registerFormat() == DataFormatJSInt32)
2284                 m_jit.move(payloadGPR, resultGpr);
2285             else {
2286                 GPRReg tagGPR = op1.tagGPR();
2287                 FPRTemporary tempFpr(this);
2288                 FPRReg fpr = tempFpr.fpr();
2289                 FPRTemporary scratch(this);
2290
2291                 JITCompiler::Jump isInteger = m_jit.branch32(MacroAssembler::Equal, tagGPR, TrustedImm32(JSValue::Int32Tag));
2292
2293                 if (node->child1().useKind() == NumberUse) {
2294                     DFG_TYPE_CHECK(
2295                         op1.jsValueRegs(), node->child1(), SpecBytecodeNumber,
2296                         m_jit.branch32(
2297                             MacroAssembler::AboveOrEqual, tagGPR,
2298                             TrustedImm32(JSValue::LowestTag)));
2299                 } else {
2300                     JITCompiler::Jump isNumber = m_jit.branch32(MacroAssembler::Below, tagGPR, TrustedImm32(JSValue::LowestTag));
2301                     
2302                     DFG_TYPE_CHECK(
2303                         op1.jsValueRegs(), node->child1(), ~SpecCell,
2304                         m_jit.branchIfCell(op1.jsValueRegs()));
2305                     
2306                     // It's not a cell: so true turns into 1 and all else turns into 0.
2307                     JITCompiler::Jump isBoolean = m_jit.branch32(JITCompiler::Equal, tagGPR, TrustedImm32(JSValue::BooleanTag));
2308                     m_jit.move(TrustedImm32(0), resultGpr);
2309                     converted.append(m_jit.jump());
2310                     
2311                     isBoolean.link(&m_jit);
2312                     m_jit.move(payloadGPR, resultGpr);
2313                     converted.append(m_jit.jump());
2314                     
2315                     isNumber.link(&m_jit);
2316                 }
2317
2318                 unboxDouble(tagGPR, payloadGPR, fpr, scratch.fpr());
2319
2320                 silentSpillAllRegisters(resultGpr);
2321                 callOperation(operationToInt32, resultGpr, fpr);
2322                 silentFillAllRegisters(resultGpr);
2323
2324                 converted.append(m_jit.jump());
2325
2326                 isInteger.link(&m_jit);
2327                 m_jit.move(payloadGPR, resultGpr);
2328
2329                 converted.link(&m_jit);
2330             }
2331 #endif
2332             int32Result(resultGpr, node);
2333             return;
2334         }
2335         case GeneratedOperandTypeUnknown:
2336             RELEASE_ASSERT(!m_compileOkay);
2337             return;
2338         }
2339         RELEASE_ASSERT_NOT_REACHED();
2340         return;
2341     }
2342     
2343     default:
2344         ASSERT(!m_compileOkay);
2345         return;
2346     }
2347 }
2348
2349 void SpeculativeJIT::compileUInt32ToNumber(Node* node)
2350 {
2351     if (doesOverflow(node->arithMode())) {
2352         if (enableInt52()) {
2353             SpeculateInt32Operand op1(this, node->child1());
2354             GPRTemporary result(this, Reuse, op1);
2355             m_jit.zeroExtend32ToPtr(op1.gpr(), result.gpr());
2356             strictInt52Result(result.gpr(), node);
2357             return;
2358         }
2359         SpeculateInt32Operand op1(this, node->child1());
2360         FPRTemporary result(this);
2361             
2362         GPRReg inputGPR = op1.gpr();
2363         FPRReg outputFPR = result.fpr();
2364             
2365         m_jit.convertInt32ToDouble(inputGPR, outputFPR);
2366             
2367         JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, inputGPR, TrustedImm32(0));
2368         m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), outputFPR);
2369         positive.link(&m_jit);
2370             
2371         doubleResult(outputFPR, node);
2372         return;
2373     }
2374     
2375     RELEASE_ASSERT(node->arithMode() == Arith::CheckOverflow);
2376
2377     SpeculateInt32Operand op1(this, node->child1());
2378     GPRTemporary result(this);
2379
2380     m_jit.move(op1.gpr(), result.gpr());
2381
2382     speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, result.gpr(), TrustedImm32(0)));
2383
2384     int32Result(result.gpr(), node, op1.format());
2385 }
2386
2387 void SpeculativeJIT::compileDoubleAsInt32(Node* node)
2388 {
2389     SpeculateDoubleOperand op1(this, node->child1());
2390     FPRTemporary scratch(this);
2391     GPRTemporary result(this);
2392     
2393     FPRReg valueFPR = op1.fpr();
2394     FPRReg scratchFPR = scratch.fpr();
2395     GPRReg resultGPR = result.gpr();
2396
2397     JITCompiler::JumpList failureCases;
2398     RELEASE_ASSERT(shouldCheckOverflow(node->arithMode()));
2399     m_jit.branchConvertDoubleToInt32(
2400         valueFPR, resultGPR, failureCases, scratchFPR,
2401         shouldCheckNegativeZero(node->arithMode()));
2402     speculationCheck(Overflow, JSValueRegs(), 0, failureCases);
2403
2404     int32Result(resultGPR, node);
2405 }
2406
2407 void SpeculativeJIT::compileDoubleRep(Node* node)
2408 {
2409     switch (node->child1().useKind()) {
2410     case RealNumberUse: {
2411         JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2412         FPRTemporary result(this);
2413         
2414         JSValueRegs op1Regs = op1.jsValueRegs();
2415         FPRReg resultFPR = result.fpr();
2416         
2417 #if USE(JSVALUE64)
2418         GPRTemporary temp(this);
2419         GPRReg tempGPR = temp.gpr();
2420         m_jit.unboxDoubleWithoutAssertions(op1Regs.gpr(), tempGPR, resultFPR);
2421 #else
2422         FPRTemporary temp(this);
2423         FPRReg tempFPR = temp.fpr();
2424         unboxDouble(op1Regs.tagGPR(), op1Regs.payloadGPR(), resultFPR, tempFPR);
2425 #endif
2426         
2427         JITCompiler::Jump done = m_jit.branchDouble(
2428             JITCompiler::DoubleEqual, resultFPR, resultFPR);
2429         
2430         DFG_TYPE_CHECK(
2431             op1Regs, node->child1(), SpecBytecodeRealNumber, m_jit.branchIfNotInt32(op1Regs));
2432         m_jit.convertInt32ToDouble(op1Regs.payloadGPR(), resultFPR);
2433         
2434         done.link(&m_jit);
2435         
2436         doubleResult(resultFPR, node);
2437         return;
2438     }
2439     
2440     case NotCellUse:
2441     case NumberUse: {
2442         ASSERT(!node->child1()->isNumberConstant()); // This should have been constant folded.
2443
2444         SpeculatedType possibleTypes = m_state.forNode(node->child1()).m_type;
2445         if (isInt32Speculation(possibleTypes)) {
2446             SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
2447             FPRTemporary result(this);
2448             m_jit.convertInt32ToDouble(op1.gpr(), result.fpr());
2449             doubleResult(result.fpr(), node);
2450             return;
2451         }
2452
2453         JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2454         FPRTemporary result(this);
2455
2456 #if USE(JSVALUE64)
2457         GPRTemporary temp(this);
2458
2459         GPRReg op1GPR = op1.gpr();
2460         GPRReg tempGPR = temp.gpr();
2461         FPRReg resultFPR = result.fpr();
2462         JITCompiler::JumpList done;
2463
2464         JITCompiler::Jump isInteger = m_jit.branch64(
2465             MacroAssembler::AboveOrEqual, op1GPR, GPRInfo::tagTypeNumberRegister);
2466
2467         if (node->child1().useKind() == NotCellUse) {
2468             JITCompiler::Jump isNumber = m_jit.branchTest64(MacroAssembler::NonZero, op1GPR, GPRInfo::tagTypeNumberRegister);
2469             JITCompiler::Jump isUndefined = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueUndefined));
2470
2471             static const double zero = 0;
2472             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&zero), resultFPR);
2473
2474             JITCompiler::Jump isNull = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueNull));
2475             done.append(isNull);
2476
2477             DFG_TYPE_CHECK(JSValueRegs(op1GPR), node->child1(), ~SpecCell,
2478                 m_jit.branchTest64(JITCompiler::Zero, op1GPR, TrustedImm32(static_cast<int32_t>(TagBitBool))));
2479
2480             JITCompiler::Jump isFalse = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueFalse));
2481             static const double one = 1;
2482             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&one), resultFPR);
2483             done.append(m_jit.jump());
2484             done.append(isFalse);
2485
2486             isUndefined.link(&m_jit);
2487             static const double NaN = PNaN;
2488             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&NaN), resultFPR);
2489             done.append(m_jit.jump());
2490
2491             isNumber.link(&m_jit);
2492         } else if (needsTypeCheck(node->child1(), SpecBytecodeNumber)) {
2493             typeCheck(
2494                 JSValueRegs(op1GPR), node->child1(), SpecBytecodeNumber,
2495                 m_jit.branchTest64(MacroAssembler::Zero, op1GPR, GPRInfo::tagTypeNumberRegister));
2496         }
2497
2498         unboxDouble(op1GPR, tempGPR, resultFPR);
2499         done.append(m_jit.jump());
2500     
2501         isInteger.link(&m_jit);
2502         m_jit.convertInt32ToDouble(op1GPR, resultFPR);
2503         done.link(&m_jit);
2504 #else // USE(JSVALUE64) -> this is the 32_64 case
2505         FPRTemporary temp(this);
2506     
2507         GPRReg op1TagGPR = op1.tagGPR();
2508         GPRReg op1PayloadGPR = op1.payloadGPR();
2509         FPRReg tempFPR = temp.fpr();
2510         FPRReg resultFPR = result.fpr();
2511         JITCompiler::JumpList done;
2512     
2513         JITCompiler::Jump isInteger = m_jit.branch32(
2514             MacroAssembler::Equal, op1TagGPR, TrustedImm32(JSValue::Int32Tag));
2515
2516         if (node->child1().useKind() == NotCellUse) {
2517             JITCompiler::Jump isNumber = m_jit.branch32(JITCompiler::Below, op1TagGPR, JITCompiler::TrustedImm32(JSValue::LowestTag + 1));
2518             JITCompiler::Jump isUndefined = m_jit.branch32(JITCompiler::Equal, op1TagGPR, TrustedImm32(JSValue::UndefinedTag));
2519
2520             static const double zero = 0;
2521             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&zero), resultFPR);
2522
2523             JITCompiler::Jump isNull = m_jit.branch32(JITCompiler::Equal, op1TagGPR, TrustedImm32(JSValue::NullTag));
2524             done.append(isNull);
2525
2526             DFG_TYPE_CHECK(JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), ~SpecCell, m_jit.branch32(JITCompiler::NotEqual, op1TagGPR, TrustedImm32(JSValue::BooleanTag)));
2527
2528             JITCompiler::Jump isFalse = m_jit.branchTest32(JITCompiler::Zero, op1PayloadGPR, TrustedImm32(1));
2529             static const double one = 1;
2530             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&one), resultFPR);
2531             done.append(m_jit.jump());
2532             done.append(isFalse);
2533
2534             isUndefined.link(&m_jit);
2535             static const double NaN = PNaN;
2536             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&NaN), resultFPR);
2537             done.append(m_jit.jump());
2538
2539             isNumber.link(&m_jit);
2540         } else if (needsTypeCheck(node->child1(), SpecBytecodeNumber)) {
2541             typeCheck(
2542                 JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), SpecBytecodeNumber,
2543                 m_jit.branch32(MacroAssembler::AboveOrEqual, op1TagGPR, TrustedImm32(JSValue::LowestTag)));
2544         }
2545
2546         unboxDouble(op1TagGPR, op1PayloadGPR, resultFPR, tempFPR);
2547         done.append(m_jit.jump());
2548     
2549         isInteger.link(&m_jit);
2550         m_jit.convertInt32ToDouble(op1PayloadGPR, resultFPR);
2551         done.link(&m_jit);
2552 #endif // USE(JSVALUE64)
2553     
2554         doubleResult(resultFPR, node);
2555         return;
2556     }
2557         
2558 #if USE(JSVALUE64)
2559     case Int52RepUse: {
2560         SpeculateStrictInt52Operand value(this, node->child1());
2561         FPRTemporary result(this);
2562         
2563         GPRReg valueGPR = value.gpr();
2564         FPRReg resultFPR = result.fpr();
2565
2566         m_jit.convertInt64ToDouble(valueGPR, resultFPR);
2567         
2568         doubleResult(resultFPR, node);
2569         return;
2570     }
2571 #endif // USE(JSVALUE64)
2572         
2573     default:
2574         RELEASE_ASSERT_NOT_REACHED();
2575         return;
2576     }
2577 }
2578
2579 void SpeculativeJIT::compileValueRep(Node* node)
2580 {
2581     switch (node->child1().useKind()) {
2582     case DoubleRepUse: {
2583         SpeculateDoubleOperand value(this, node->child1());
2584         JSValueRegsTemporary result(this);
2585         
2586         FPRReg valueFPR = value.fpr();
2587         JSValueRegs resultRegs = result.regs();
2588         
2589         // It's very tempting to in-place filter the value to indicate that it's not impure NaN
2590         // anymore. Unfortunately, this would be unsound. If it's a GetLocal or if the value was
2591         // subject to a prior SetLocal, filtering the value would imply that the corresponding
2592         // local was purified.
2593         if (needsTypeCheck(node->child1(), ~SpecDoubleImpureNaN))
2594             m_jit.purifyNaN(valueFPR);
2595
2596         boxDouble(valueFPR, resultRegs);
2597         
2598         jsValueResult(resultRegs, node);
2599         return;
2600     }
2601         
2602 #if USE(JSVALUE64)
2603     case Int52RepUse: {
2604         SpeculateStrictInt52Operand value(this, node->child1());
2605         GPRTemporary result(this);
2606         
2607         GPRReg valueGPR = value.gpr();
2608         GPRReg resultGPR = result.gpr();
2609         
2610         boxInt52(valueGPR, resultGPR, DataFormatStrictInt52);
2611         
2612         jsValueResult(resultGPR, node);
2613         return;
2614     }
2615 #endif // USE(JSVALUE64)
2616         
2617     default:
2618         RELEASE_ASSERT_NOT_REACHED();
2619         return;
2620     }
2621 }
2622
2623 static double clampDoubleToByte(double d)
2624 {
2625     d += 0.5;
2626     if (!(d > 0))
2627         d = 0;
2628     else if (d > 255)
2629         d = 255;
2630     return d;
2631 }
2632
2633 static void compileClampIntegerToByte(JITCompiler& jit, GPRReg result)
2634 {
2635     MacroAssembler::Jump inBounds = jit.branch32(MacroAssembler::BelowOrEqual, result, JITCompiler::TrustedImm32(0xff));
2636     MacroAssembler::Jump tooBig = jit.branch32(MacroAssembler::GreaterThan, result, JITCompiler::TrustedImm32(0xff));
2637     jit.xorPtr(result, result);
2638     MacroAssembler::Jump clamped = jit.jump();
2639     tooBig.link(&jit);
2640     jit.move(JITCompiler::TrustedImm32(255), result);
2641     clamped.link(&jit);
2642     inBounds.link(&jit);
2643 }
2644
2645 static void compileClampDoubleToByte(JITCompiler& jit, GPRReg result, FPRReg source, FPRReg scratch)
2646 {
2647     // Unordered compare so we pick up NaN
2648     static const double zero = 0;
2649     static const double byteMax = 255;
2650     static const double half = 0.5;
2651     jit.loadDouble(MacroAssembler::TrustedImmPtr(&zero), scratch);
2652     MacroAssembler::Jump tooSmall = jit.branchDouble(MacroAssembler::DoubleLessThanOrEqualOrUnordered, source, scratch);
2653     jit.loadDouble(MacroAssembler::TrustedImmPtr(&byteMax), scratch);
2654     MacroAssembler::Jump tooBig = jit.branchDouble(MacroAssembler::DoubleGreaterThan, source, scratch);
2655     
2656     jit.loadDouble(MacroAssembler::TrustedImmPtr(&half), scratch);
2657     // FIXME: This should probably just use a floating point round!
2658     // https://bugs.webkit.org/show_bug.cgi?id=72054
2659     jit.addDouble(source, scratch);
2660     jit.truncateDoubleToInt32(scratch, result);   
2661     MacroAssembler::Jump truncatedInt = jit.jump();
2662     
2663     tooSmall.link(&jit);
2664     jit.xorPtr(result, result);
2665     MacroAssembler::Jump zeroed = jit.jump();
2666     
2667     tooBig.link(&jit);
2668     jit.move(JITCompiler::TrustedImm32(255), result);
2669     
2670     truncatedInt.link(&jit);
2671     zeroed.link(&jit);
2672
2673 }
2674
2675 JITCompiler::Jump SpeculativeJIT::jumpForTypedArrayOutOfBounds(Node* node, GPRReg baseGPR, GPRReg indexGPR)
2676 {
2677     if (node->op() == PutByValAlias)
2678         return JITCompiler::Jump();
2679     JSArrayBufferView* view = m_jit.graph().tryGetFoldableView(
2680         m_state.forNode(m_jit.graph().child(node, 0)).m_value, node->arrayMode());
2681     if (view) {
2682         uint32_t length = view->length();
2683         Node* indexNode = m_jit.graph().child(node, 1).node();
2684         if (indexNode->isInt32Constant() && indexNode->asUInt32() < length)
2685             return JITCompiler::Jump();
2686         return m_jit.branch32(
2687             MacroAssembler::AboveOrEqual, indexGPR, MacroAssembler::Imm32(length));
2688     }
2689     return m_jit.branch32(
2690         MacroAssembler::AboveOrEqual, indexGPR,
2691         MacroAssembler::Address(baseGPR, JSArrayBufferView::offsetOfLength()));
2692 }
2693
2694 void SpeculativeJIT::emitTypedArrayBoundsCheck(Node* node, GPRReg baseGPR, GPRReg indexGPR)
2695 {
2696     JITCompiler::Jump jump = jumpForTypedArrayOutOfBounds(node, baseGPR, indexGPR);
2697     if (!jump.isSet())
2698         return;
2699     speculationCheck(OutOfBounds, JSValueRegs(), 0, jump);
2700 }
2701
2702 JITCompiler::Jump SpeculativeJIT::jumpForTypedArrayIsNeuteredIfOutOfBounds(Node* node, GPRReg base, JITCompiler::Jump outOfBounds)
2703 {
2704     JITCompiler::Jump done;
2705     if (outOfBounds.isSet()) {
2706         done = m_jit.jump();
2707         if (node->arrayMode().isInBounds())
2708             speculationCheck(OutOfBounds, JSValueSource(), 0, outOfBounds);
2709         else {
2710             outOfBounds.link(&m_jit);
2711
2712             JITCompiler::Jump notWasteful = m_jit.branch32(
2713                 MacroAssembler::NotEqual,
2714                 MacroAssembler::Address(base, JSArrayBufferView::offsetOfMode()),
2715                 TrustedImm32(WastefulTypedArray));
2716
2717             JITCompiler::Jump hasNullVector = m_jit.branchTestPtr(
2718                 MacroAssembler::Zero,
2719                 MacroAssembler::Address(base, JSArrayBufferView::offsetOfVector()));
2720             speculationCheck(Uncountable, JSValueSource(), node, hasNullVector);
2721             notWasteful.link(&m_jit);
2722         }
2723     }
2724     return done;
2725 }
2726
2727 void SpeculativeJIT::compileGetByValOnIntTypedArray(Node* node, TypedArrayType type)
2728 {
2729     ASSERT(isInt(type));
2730     
2731     SpeculateCellOperand base(this, node->child1());
2732     SpeculateStrictInt32Operand property(this, node->child2());
2733     StorageOperand storage(this, node->child3());
2734
2735     GPRReg baseReg = base.gpr();
2736     GPRReg propertyReg = property.gpr();
2737     GPRReg storageReg = storage.gpr();
2738
2739     GPRTemporary result(this);
2740     GPRReg resultReg = result.gpr();
2741
2742     ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2743
2744     emitTypedArrayBoundsCheck(node, baseReg, propertyReg);
2745     switch (elementSize(type)) {
2746     case 1:
2747         if (isSigned(type))
2748             m_jit.load8SignedExtendTo32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2749         else
2750             m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2751         break;
2752     case 2:
2753         if (isSigned(type))
2754             m_jit.load16SignedExtendTo32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2755         else
2756             m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2757         break;
2758     case 4:
2759         m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
2760         break;
2761     default:
2762         CRASH();
2763     }
2764     if (elementSize(type) < 4 || isSigned(type)) {
2765         int32Result(resultReg, node);
2766         return;
2767     }
2768     
2769     ASSERT(elementSize(type) == 4 && !isSigned(type));
2770     if (node->shouldSpeculateInt32()) {
2771         speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, resultReg, TrustedImm32(0)));
2772         int32Result(resultReg, node);
2773         return;
2774     }
2775     
2776 #if USE(JSVALUE64)
2777     if (node->shouldSpeculateAnyInt()) {
2778         m_jit.zeroExtend32ToPtr(resultReg, resultReg);
2779         strictInt52Result(resultReg, node);
2780         return;
2781     }
2782 #endif
2783     
2784     FPRTemporary fresult(this);
2785     m_jit.convertInt32ToDouble(resultReg, fresult.fpr());
2786     JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, resultReg, TrustedImm32(0));
2787     m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), fresult.fpr());
2788     positive.link(&m_jit);
2789     doubleResult(fresult.fpr(), node);
2790 }
2791
2792 void SpeculativeJIT::compilePutByValForIntTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
2793 {
2794     ASSERT(isInt(type));
2795     
2796     StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
2797     GPRReg storageReg = storage.gpr();
2798     
2799     Edge valueUse = m_jit.graph().varArgChild(node, 2);
2800     
2801     GPRTemporary value;
2802 #if USE(JSVALUE32_64)
2803     GPRTemporary propertyTag;
2804     GPRTemporary valueTag;
2805 #endif
2806
2807     GPRReg valueGPR = InvalidGPRReg;
2808 #if USE(JSVALUE32_64)
2809     GPRReg propertyTagGPR = InvalidGPRReg;
2810     GPRReg valueTagGPR = InvalidGPRReg;
2811 #endif
2812
2813     JITCompiler::JumpList slowPathCases;
2814
2815     if (valueUse->isConstant()) {
2816         JSValue jsValue = valueUse->asJSValue();
2817         if (!jsValue.isNumber()) {
2818             terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
2819             noResult(node);
2820             return;
2821         }
2822         double d = jsValue.asNumber();
2823         if (isClamped(type)) {
2824             ASSERT(elementSize(type) == 1);
2825             d = clampDoubleToByte(d);
2826         }
2827         GPRTemporary scratch(this);
2828         GPRReg scratchReg = scratch.gpr();
2829         m_jit.move(Imm32(toInt32(d)), scratchReg);
2830         value.adopt(scratch);
2831         valueGPR = scratchReg;
2832     } else {
2833         switch (valueUse.useKind()) {
2834         case Int32Use: {
2835             SpeculateInt32Operand valueOp(this, valueUse);
2836             GPRTemporary scratch(this);
2837             GPRReg scratchReg = scratch.gpr();
2838             m_jit.move(valueOp.gpr(), scratchReg);
2839             if (isClamped(type)) {
2840                 ASSERT(elementSize(type) == 1);
2841                 compileClampIntegerToByte(m_jit, scratchReg);
2842             }
2843             value.adopt(scratch);
2844             valueGPR = scratchReg;
2845             break;
2846         }
2847             
2848 #if USE(JSVALUE64)
2849         case Int52RepUse: {
2850             SpeculateStrictInt52Operand valueOp(this, valueUse);
2851             GPRTemporary scratch(this);
2852             GPRReg scratchReg = scratch.gpr();
2853             m_jit.move(valueOp.gpr(), scratchReg);
2854             if (isClamped(type)) {
2855                 ASSERT(elementSize(type) == 1);
2856                 MacroAssembler::Jump inBounds = m_jit.branch64(
2857                     MacroAssembler::BelowOrEqual, scratchReg, JITCompiler::TrustedImm64(0xff));
2858                 MacroAssembler::Jump tooBig = m_jit.branch64(
2859                     MacroAssembler::GreaterThan, scratchReg, JITCompiler::TrustedImm64(0xff));
2860                 m_jit.move(TrustedImm32(0), scratchReg);
2861                 MacroAssembler::Jump clamped = m_jit.jump();
2862                 tooBig.link(&m_jit);
2863                 m_jit.move(JITCompiler::TrustedImm32(255), scratchReg);
2864                 clamped.link(&m_jit);
2865                 inBounds.link(&m_jit);
2866             }
2867             value.adopt(scratch);
2868             valueGPR = scratchReg;
2869             break;
2870         }
2871 #endif // USE(JSVALUE64)
2872             
2873         case DoubleRepUse: {
2874             if (isClamped(type)) {
2875                 ASSERT(elementSize(type) == 1);
2876                 SpeculateDoubleOperand valueOp(this, valueUse);
2877                 GPRTemporary result(this);
2878                 FPRTemporary floatScratch(this);
2879                 FPRReg fpr = valueOp.fpr();
2880                 GPRReg gpr = result.gpr();
2881                 compileClampDoubleToByte(m_jit, gpr, fpr, floatScratch.fpr());
2882                 value.adopt(result);
2883                 valueGPR = gpr;
2884             } else {
2885 #if USE(JSVALUE32_64)
2886                 GPRTemporary realPropertyTag(this);
2887                 propertyTag.adopt(realPropertyTag);
2888                 propertyTagGPR = propertyTag.gpr();
2889
2890                 GPRTemporary realValueTag(this);
2891                 valueTag.adopt(realValueTag);
2892                 valueTagGPR = valueTag.gpr();
2893 #endif
2894                 SpeculateDoubleOperand valueOp(this, valueUse);
2895                 GPRTemporary result(this);
2896                 FPRReg fpr = valueOp.fpr();
2897                 GPRReg gpr = result.gpr();
2898                 MacroAssembler::Jump notNaN = m_jit.branchDouble(MacroAssembler::DoubleEqual, fpr, fpr);
2899                 m_jit.xorPtr(gpr, gpr);
2900                 MacroAssembler::JumpList fixed(m_jit.jump());
2901                 notNaN.link(&m_jit);
2902
2903                 fixed.append(m_jit.branchTruncateDoubleToInt32(
2904                     fpr, gpr, MacroAssembler::BranchIfTruncateSuccessful));
2905
2906 #if USE(JSVALUE64)
2907                 m_jit.or64(GPRInfo::tagTypeNumberRegister, property);
2908                 boxDouble(fpr, gpr);
2909 #else
2910                 m_jit.move(TrustedImm32(JSValue::Int32Tag), propertyTagGPR);
2911                 boxDouble(fpr, valueTagGPR, gpr);
2912 #endif
2913                 slowPathCases.append(m_jit.jump());
2914
2915                 fixed.link(&m_jit);
2916                 value.adopt(result);
2917                 valueGPR = gpr;
2918             }
2919             break;
2920         }
2921             
2922         default:
2923             RELEASE_ASSERT_NOT_REACHED();
2924             break;
2925         }
2926     }
2927     
2928     ASSERT_UNUSED(valueGPR, valueGPR != property);
2929     ASSERT(valueGPR != base);
2930     ASSERT(valueGPR != storageReg);
2931     JITCompiler::Jump outOfBounds = jumpForTypedArrayOutOfBounds(node, base, property);
2932
2933     switch (elementSize(type)) {
2934     case 1:
2935         m_jit.store8(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesOne));
2936         break;
2937     case 2:
2938         m_jit.store16(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesTwo));
2939         break;
2940     case 4:
2941         m_jit.store32(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
2942         break;
2943     default:
2944         CRASH();
2945     }
2946
2947     JITCompiler::Jump done = jumpForTypedArrayIsNeuteredIfOutOfBounds(node, base, outOfBounds);
2948     if (done.isSet())
2949         done.link(&m_jit);
2950
2951     if (!slowPathCases.empty()) {
2952 #if USE(JSVALUE64)
2953         if (node->op() == PutByValDirect) {
2954             addSlowPathGenerator(slowPathCall(
2955                 slowPathCases, this,
2956                 m_jit.isStrictModeFor(node->origin.semantic) ? operationPutByValDirectStrict : operationPutByValDirectNonStrict,
2957                 NoResult, base, property, valueGPR));
2958         } else {
2959             addSlowPathGenerator(slowPathCall(
2960                 slowPathCases, this,
2961                 m_jit.isStrictModeFor(node->origin.semantic) ? operationPutByValStrict : operationPutByValNonStrict,
2962                 NoResult, base, property, valueGPR));
2963         }
2964 #else // not USE(JSVALUE64)
2965         if (node->op() == PutByValDirect) {
2966             addSlowPathGenerator(slowPathCall(
2967                 slowPathCases, this,
2968                 m_jit.codeBlock()->isStrictMode() ? operationPutByValDirectCellStrict : operationPutByValDirectCellNonStrict,
2969                 NoResult, base, JSValueRegs(propertyTagGPR, property), JSValueRegs(valueTagGPR, valueGPR)));
2970         } else {
2971             addSlowPathGenerator(slowPathCall(
2972                 slowPathCases, this,
2973                 m_jit.codeBlock()->isStrictMode() ? operationPutByValCellStrict : operationPutByValCellNonStrict,
2974                 NoResult, base, JSValueRegs(propertyTagGPR, property), JSValueRegs(valueTagGPR, valueGPR)));
2975         }
2976 #endif
2977     }
2978     noResult(node);
2979 }
2980
2981 void SpeculativeJIT::compileGetByValOnFloatTypedArray(Node* node, TypedArrayType type)
2982 {
2983     ASSERT(isFloat(type));
2984     
2985     SpeculateCellOperand base(this, node->child1());
2986     SpeculateStrictInt32Operand property(this, node->child2());
2987     StorageOperand storage(this, node->child3());
2988
2989     GPRReg baseReg = base.gpr();
2990     GPRReg propertyReg = property.gpr();
2991     GPRReg storageReg = storage.gpr();
2992
2993     ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2994
2995     FPRTemporary result(this);
2996     FPRReg resultReg = result.fpr();
2997     emitTypedArrayBoundsCheck(node, baseReg, propertyReg);
2998     switch (elementSize(type)) {
2999     case 4:
3000         m_jit.loadFloat(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
3001         m_jit.convertFloatToDouble(resultReg, resultReg);
3002         break;
3003     case 8: {
3004         m_jit.loadDouble(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight), resultReg);
3005         break;
3006     }
3007     default:
3008         RELEASE_ASSERT_NOT_REACHED();
3009     }
3010     
3011     doubleResult(resultReg, node);
3012 }
3013
3014 void SpeculativeJIT::compilePutByValForFloatTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
3015 {
3016     ASSERT(isFloat(type));
3017     
3018     StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
3019     GPRReg storageReg = storage.gpr();
3020     
3021     Edge baseUse = m_jit.graph().varArgChild(node, 0);
3022     Edge valueUse = m_jit.graph().varArgChild(node, 2);
3023
3024     SpeculateDoubleOperand valueOp(this, valueUse);
3025     FPRTemporary scratch(this);
3026     FPRReg valueFPR = valueOp.fpr();
3027     FPRReg scratchFPR = scratch.fpr();
3028
3029     ASSERT_UNUSED(baseUse, node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(baseUse)));
3030     
3031     MacroAssembler::Jump outOfBounds = jumpForTypedArrayOutOfBounds(node, base, property);
3032     
3033     switch (elementSize(type)) {
3034     case 4: {
3035         m_jit.moveDouble(valueFPR, scratchFPR);
3036         m_jit.convertDoubleToFloat(valueFPR, scratchFPR);
3037         m_jit.storeFloat(scratchFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
3038         break;
3039     }
3040     case 8:
3041         m_jit.storeDouble(valueFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesEight));
3042         break;
3043     default:
3044         RELEASE_ASSERT_NOT_REACHED();
3045     }
3046
3047     JITCompiler::Jump done = jumpForTypedArrayIsNeuteredIfOutOfBounds(node, base, outOfBounds);
3048     if (done.isSet())
3049         done.link(&m_jit);
3050     noResult(node);
3051 }
3052
3053 void SpeculativeJIT::compileInstanceOfForObject(Node*, GPRReg valueReg, GPRReg prototypeReg, GPRReg scratchReg, GPRReg scratch2Reg)
3054 {
3055     // Check that prototype is an object.
3056     speculationCheck(BadType, JSValueRegs(), 0, m_jit.branchIfNotObject(prototypeReg));
3057     
3058     // Initialize scratchReg with the value being checked.
3059     m_jit.move(valueReg, scratchReg);
3060     
3061     // Walk up the prototype chain of the value (in scratchReg), comparing to prototypeReg.
3062     MacroAssembler::Label loop(&m_jit);
3063     MacroAssembler::Jump performDefaultHasInstance = m_jit.branch8(MacroAssembler::Equal,
3064         MacroAssembler::Address(scratchReg, JSCell::typeInfoTypeOffset()), TrustedImm32(ProxyObjectType));
3065     m_jit.emitLoadStructure(scratchReg, scratchReg, scratch2Reg);
3066     m_jit.loadPtr(MacroAssembler::Address(scratchReg, Structure::prototypeOffset() + CellPayloadOffset), scratchReg);
3067     MacroAssembler::Jump isInstance = m_jit.branchPtr(MacroAssembler::Equal, scratchReg, prototypeReg);
3068 #if USE(JSVALUE64)
3069     m_jit.branchIfCell(JSValueRegs(scratchReg)).linkTo(loop, &m_jit);
3070 #else
3071     m_jit.branchTestPtr(MacroAssembler::NonZero, scratchReg).linkTo(loop, &m_jit);
3072 #endif
3073     
3074     // No match - result is false.
3075 #if USE(JSVALUE64)
3076     m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(false))), scratchReg);
3077 #else
3078     m_jit.move(MacroAssembler::TrustedImm32(0), scratchReg);
3079 #endif
3080     MacroAssembler::JumpList doneJumps; 
3081     doneJumps.append(m_jit.jump());
3082
3083     performDefaultHasInstance.link(&m_jit);
3084     silentSpillAllRegisters(scratchReg);
3085     callOperation(operationDefaultHasInstance, scratchReg, valueReg, prototypeReg); 
3086     silentFillAllRegisters(scratchReg);
3087     m_jit.exceptionCheck();
3088 #if USE(JSVALUE64)
3089     m_jit.or32(TrustedImm32(ValueFalse), scratchReg);
3090 #endif
3091     doneJumps.append(m_jit.jump());
3092     
3093     isInstance.link(&m_jit);
3094 #if USE(JSVALUE64)
3095     m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(true))), scratchReg);
3096 #else
3097     m_jit.move(MacroAssembler::TrustedImm32(1), scratchReg);
3098 #endif
3099     
3100     doneJumps.link(&m_jit);
3101 }
3102
3103 void SpeculativeJIT::compileCheckTypeInfoFlags(Node* node)
3104 {
3105     SpeculateCellOperand base(this, node->child1());
3106
3107     GPRReg baseGPR = base.gpr();
3108
3109     speculationCheck(BadTypeInfoFlags, JSValueRegs(), 0, m_jit.branchTest8(MacroAssembler::Zero, MacroAssembler::Address(baseGPR, JSCell::typeInfoFlagsOffset()), MacroAssembler::TrustedImm32(node->typeInfoOperand())));
3110
3111     noResult(node);
3112 }
3113
3114 void SpeculativeJIT::compileInstanceOf(Node* node)
3115 {
3116     if (node->child1().useKind() == UntypedUse) {
3117         // It might not be a cell. Speculate less aggressively.
3118         // Or: it might only be used once (i.e. by us), so we get zero benefit
3119         // from speculating any more aggressively than we absolutely need to.
3120         
3121         JSValueOperand value(this, node->child1());
3122         SpeculateCellOperand prototype(this, node->child2());
3123         GPRTemporary scratch(this);
3124         GPRTemporary scratch2(this);
3125         
3126         GPRReg prototypeReg = prototype.gpr();
3127         GPRReg scratchReg = scratch.gpr();
3128         GPRReg scratch2Reg = scratch2.gpr();
3129         
3130         MacroAssembler::Jump isCell = m_jit.branchIfCell(value.jsValueRegs());
3131         GPRReg valueReg = value.jsValueRegs().payloadGPR();
3132         moveFalseTo(scratchReg);
3133
3134         MacroAssembler::Jump done = m_jit.jump();
3135         
3136         isCell.link(&m_jit);
3137         
3138         compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg, scratch2Reg);
3139         
3140         done.link(&m_jit);
3141
3142         blessedBooleanResult(scratchReg, node);
3143         return;
3144     }
3145     
3146     SpeculateCellOperand value(this, node->child1());
3147     SpeculateCellOperand prototype(this, node->child2());
3148     
3149     GPRTemporary scratch(this);
3150     GPRTemporary scratch2(this);
3151     
3152     GPRReg valueReg = value.gpr();
3153     GPRReg prototypeReg = prototype.gpr();
3154     GPRReg scratchReg = scratch.gpr();
3155     GPRReg scratch2Reg = scratch2.gpr();
3156     
3157     compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg, scratch2Reg);
3158
3159     blessedBooleanResult(scratchReg, node);
3160 }
3161
3162 template<typename SnippetGenerator, J_JITOperation_EJJ snippetSlowPathFunction>
3163 void SpeculativeJIT::emitUntypedBitOp(Node* node)
3164 {
3165     Edge& leftChild = node->child1();
3166     Edge& rightChild = node->child2();
3167
3168     if (isKnownNotNumber(leftChild.node()) || isKnownNotNumber(rightChild.node())) {
3169         JSValueOperand left(this, leftChild);
3170         JSValueOperand right(this, rightChild);
3171         JSValueRegs leftRegs = left.jsValueRegs();
3172         JSValueRegs rightRegs = right.jsValueRegs();
3173 #if USE(JSVALUE64)
3174         GPRTemporary result(this);
3175         JSValueRegs resultRegs = JSValueRegs(result.gpr());
3176 #else
3177         GPRTemporary resultTag(this);
3178         GPRTemporary resultPayload(this);
3179         JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3180 #endif
3181         flushRegisters();
3182         callOperation(snippetSlowPathFunction, resultRegs, leftRegs, rightRegs);
3183         m_jit.exceptionCheck();
3184
3185         jsValueResult(resultRegs, node);
3186         return;
3187     }
3188
3189     std::optional<JSValueOperand> left;
3190     std::optional<JSValueOperand> right;
3191
3192     JSValueRegs leftRegs;
3193     JSValueRegs rightRegs;
3194
3195 #if USE(JSVALUE64)
3196     GPRTemporary result(this);
3197     JSValueRegs resultRegs = JSValueRegs(result.gpr());
3198     GPRTemporary scratch(this);
3199     GPRReg scratchGPR = scratch.gpr();
3200 #else
3201     GPRTemporary resultTag(this);
3202     GPRTemporary resultPayload(this);
3203     JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3204     GPRReg scratchGPR = resultTag.gpr();
3205 #endif
3206
3207     SnippetOperand leftOperand;
3208     SnippetOperand rightOperand;
3209
3210     // The snippet generator does not support both operands being constant. If the left
3211     // operand is already const, we'll ignore the right operand's constness.
3212     if (leftChild->isInt32Constant())
3213         leftOperand.setConstInt32(leftChild->asInt32());
3214     else if (rightChild->isInt32Constant())
3215         rightOperand.setConstInt32(rightChild->asInt32());
3216
3217     RELEASE_ASSERT(!leftOperand.isConst() || !rightOperand.isConst());
3218
3219     if (!leftOperand.isConst()) {
3220         left.emplace(this, leftChild);
3221         leftRegs = left->jsValueRegs();
3222     }
3223     if (!rightOperand.isConst()) {
3224         right.emplace(this, rightChild);
3225         rightRegs = right->jsValueRegs();
3226     }
3227
3228     SnippetGenerator gen(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs, scratchGPR);
3229     gen.generateFastPath(m_jit);
3230
3231     ASSERT(gen.didEmitFastPath());
3232     gen.endJumpList().append(m_jit.jump());
3233
3234     gen.slowPathJumpList().link(&m_jit);
3235     silentSpillAllRegisters(resultRegs);
3236
3237     if (leftOperand.isConst()) {
3238         leftRegs = resultRegs;
3239         m_jit.moveValue(leftChild->asJSValue(), leftRegs);
3240     } else if (rightOperand.isConst()) {
3241         rightRegs = resultRegs;
3242         m_jit.moveValue(rightChild->asJSValue(), rightRegs);
3243     }
3244
3245     callOperation(snippetSlowPathFunction, resultRegs, leftRegs, rightRegs);
3246
3247     silentFillAllRegisters(resultRegs);
3248     m_jit.exceptionCheck();
3249
3250     gen.endJumpList().link(&m_jit);
3251     jsValueResult(resultRegs, node);
3252 }
3253
3254 void SpeculativeJIT::compileBitwiseOp(Node* node)
3255 {
3256     NodeType op = node->op();
3257     Edge& leftChild = node->child1();
3258     Edge& rightChild = node->child2();
3259
3260     if (leftChild.useKind() == UntypedUse || rightChild.useKind() == UntypedUse) {
3261         switch (op) {
3262         case BitAnd:
3263             emitUntypedBitOp<JITBitAndGenerator, operationValueBitAnd>(node);
3264             return;
3265         case BitOr:
3266             emitUntypedBitOp<JITBitOrGenerator, operationValueBitOr>(node);
3267             return;
3268         case BitXor:
3269             emitUntypedBitOp<JITBitXorGenerator, operationValueBitXor>(node);
3270             return;
3271         default:
3272             RELEASE_ASSERT_NOT_REACHED();
3273         }
3274     }
3275
3276     if (leftChild->isInt32Constant()) {
3277         SpeculateInt32Operand op2(this, rightChild);
3278         GPRTemporary result(this, Reuse, op2);
3279
3280         bitOp(op, leftChild->asInt32(), op2.gpr(), result.gpr());
3281
3282         int32Result(result.gpr(), node);
3283
3284     } else if (rightChild->isInt32Constant()) {
3285         SpeculateInt32Operand op1(this, leftChild);
3286         GPRTemporary result(this, Reuse, op1);
3287
3288         bitOp(op, rightChild->asInt32(), op1.gpr(), result.gpr());
3289
3290         int32Result(result.gpr(), node);
3291
3292     } else {
3293         SpeculateInt32Operand op1(this, leftChild);
3294         SpeculateInt32Operand op2(this, rightChild);
3295         GPRTemporary result(this, Reuse, op1, op2);
3296         
3297         GPRReg reg1 = op1.gpr();
3298         GPRReg reg2 = op2.gpr();
3299         bitOp(op, reg1, reg2, result.gpr());
3300         
3301         int32Result(result.gpr(), node);
3302     }
3303 }
3304
3305 void SpeculativeJIT::emitUntypedRightShiftBitOp(Node* node)
3306 {
3307     J_JITOperation_EJJ snippetSlowPathFunction = node->op() == BitRShift
3308         ? operationValueBitRShift : operationValueBitURShift;
3309     JITRightShiftGenerator::ShiftType shiftType = node->op() == BitRShift
3310         ? JITRightShiftGenerator::SignedShift : JITRightShiftGenerator::UnsignedShift;
3311
3312     Edge& leftChild = node->child1();
3313     Edge& rightChild = node->child2();
3314
3315     if (isKnownNotNumber(leftChild.node()) || isKnownNotNumber(rightChild.node())) {
3316         JSValueOperand left(this, leftChild);
3317         JSValueOperand right(this, rightChild);
3318         JSValueRegs leftRegs = left.jsValueRegs();
3319         JSValueRegs rightRegs = right.jsValueRegs();
3320 #if USE(JSVALUE64)
3321         GPRTemporary result(this);
3322         JSValueRegs resultRegs = JSValueRegs(result.gpr());
3323 #else
3324         GPRTemporary resultTag(this);
3325         GPRTemporary resultPayload(this);
3326         JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3327 #endif
3328         flushRegisters();
3329         callOperation(snippetSlowPathFunction, resultRegs, leftRegs, rightRegs);
3330         m_jit.exceptionCheck();
3331
3332         jsValueResult(resultRegs, node);
3333         return;
3334     }
3335
3336     std::optional<JSValueOperand> left;
3337     std::optional<JSValueOperand> right;
3338
3339     JSValueRegs leftRegs;
3340     JSValueRegs rightRegs;
3341
3342     FPRTemporary leftNumber(this);
3343     FPRReg leftFPR = leftNumber.fpr();
3344
3345 #if USE(JSVALUE64)
3346     GPRTemporary result(this);
3347     JSValueRegs resultRegs = JSValueRegs(result.gpr());
3348     GPRTemporary scratch(this);
3349     GPRReg scratchGPR = scratch.gpr();
3350     FPRReg scratchFPR = InvalidFPRReg;
3351 #else
3352     GPRTemporary resultTag(this);
3353     GPRTemporary resultPayload(this);
3354     JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3355     GPRReg scratchGPR = resultTag.gpr();
3356     FPRTemporary fprScratch(this);
3357     FPRReg scratchFPR = fprScratch.fpr();
3358 #endif
3359
3360     SnippetOperand leftOperand;
3361     SnippetOperand rightOperand;
3362
3363     // The snippet generator does not support both operands being constant. If the left
3364     // operand is already const, we'll ignore the right operand's constness.
3365     if (leftChild->isInt32Constant())
3366         leftOperand.setConstInt32(leftChild->asInt32());
3367     else if (rightChild->isInt32Constant())
3368         rightOperand.setConstInt32(rightChild->asInt32());
3369
3370     RELEASE_ASSERT(!leftOperand.isConst() || !rightOperand.isConst());
3371
3372     if (!leftOperand.isConst()) {
3373         left.emplace(this, leftChild);
3374         leftRegs = left->jsValueRegs();
3375     }
3376     if (!rightOperand.isConst()) {
3377         right.emplace(this, rightChild);
3378         rightRegs = right->jsValueRegs();
3379     }
3380
3381     JITRightShiftGenerator gen(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs,
3382         leftFPR, scratchGPR, scratchFPR, shiftType);
3383     gen.generateFastPath(m_jit);
3384
3385     ASSERT(gen.didEmitFastPath());
3386     gen.endJumpList().append(m_jit.jump());
3387
3388     gen.slowPathJumpList().link(&m_jit);
3389     silentSpillAllRegisters(resultRegs);
3390
3391     if (leftOperand.isConst()) {
3392         leftRegs = resultRegs;
3393         m_jit.moveValue(leftChild->asJSValue(), leftRegs);
3394     } else if (rightOperand.isConst()) {
3395         rightRegs = resultRegs;
3396         m_jit.moveValue(rightChild->asJSValue(), rightRegs);
3397     }
3398
3399     callOperation(snippetSlowPathFunction, resultRegs, leftRegs, rightRegs);
3400
3401     silentFillAllRegisters(resultRegs);
3402     m_jit.exceptionCheck();
3403
3404     gen.endJumpList().link(&m_jit);
3405     jsValueResult(resultRegs, node);
3406     return;
3407 }
3408
3409 void SpeculativeJIT::compileShiftOp(Node* node)
3410 {
3411     NodeType op = node->op();
3412     Edge& leftChild = node->child1();
3413     Edge& rightChild = node->child2();
3414
3415     if (leftChild.useKind() == UntypedUse || rightChild.useKind() == UntypedUse) {
3416         switch (op) {
3417         case BitLShift:
3418             emitUntypedBitOp<JITLeftShiftGenerator, operationValueBitLShift>(node);
3419             return;
3420         case BitRShift:
3421         case BitURShift:
3422             emitUntypedRightShiftBitOp(node);
3423             return;
3424         default:
3425             RELEASE_ASSERT_NOT_REACHED();
3426         }
3427     }
3428
3429     if (rightChild->isInt32Constant()) {
3430         SpeculateInt32Operand op1(this, leftChild);
3431         GPRTemporary result(this, Reuse, op1);
3432
3433         shiftOp(op, op1.gpr(), rightChild->asInt32() & 0x1f, result.gpr());
3434
3435         int32Result(result.gpr(), node);
3436     } else {
3437         // Do not allow shift amount to be used as the result, MacroAssembler does not permit this.
3438         SpeculateInt32Operand op1(this, leftChild);
3439         SpeculateInt32Operand op2(this, rightChild);
3440         GPRTemporary result(this, Reuse, op1);
3441
3442         GPRReg reg1 = op1.gpr();
3443         GPRReg reg2 = op2.gpr();
3444         shiftOp(op, reg1, reg2, result.gpr());
3445
3446         int32Result(result.gpr(), node);
3447     }
3448 }
3449
3450 void SpeculativeJIT::compileValueAdd(Node* node)
3451 {
3452     Edge& leftChild = node->child1();
3453     Edge& rightChild = node->child2();
3454
3455     if (isKnownNotNumber(leftChild.node()) || isKnownNotNumber(rightChild.node())) {
3456         JSValueOperand left(this, leftChild);
3457         JSValueOperand right(this, rightChild);
3458         JSValueRegs leftRegs = left.jsValueRegs();
3459         JSValueRegs rightRegs = right.jsValueRegs();
3460 #if USE(JSVALUE64)
3461         GPRTemporary result(this);
3462         JSValueRegs resultRegs = JSValueRegs(result.gpr());
3463 #else
3464         GPRTemporary resultTag(this);
3465         GPRTemporary resultPayload(this);
3466         JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3467 #endif
3468         flushRegisters();
3469         callOperation(operationValueAddNotNumber, resultRegs, leftRegs, rightRegs);
3470         m_jit.exceptionCheck();
3471     
3472         jsValueResult(resultRegs, node);
3473         return;
3474     }
3475
3476 #if USE(JSVALUE64)
3477     bool needsScratchGPRReg = true;
3478     bool needsScratchFPRReg = false;
3479 #else
3480     bool needsScratchGPRReg = true;
3481     bool needsScratchFPRReg = true;
3482 #endif
3483
3484     ArithProfile* arithProfile = m_jit.graph().baselineCodeBlockFor(node->origin.semantic)->arithProfileForBytecodeOffset(node->origin.semantic.bytecodeIndex);
3485     JITAddIC* addIC = m_jit.codeBlock()->addJITAddIC(arithProfile);
3486     auto repatchingFunction = operationValueAddOptimize;
3487     auto nonRepatchingFunction = operationValueAdd;
3488     
3489     compileMathIC(node, addIC, needsScratchGPRReg, needsScratchFPRReg, repatchingFunction, nonRepatchingFunction);
3490 }
3491
3492 template <typename Generator, typename RepatchingFunction, typename NonRepatchingFunction>
3493 void SpeculativeJIT::compileMathIC(Node* node, JITBinaryMathIC<Generator>* mathIC, bool needsScratchGPRReg, bool needsScratchFPRReg, RepatchingFunction repatchingFunction, NonRepatchingFunction nonRepatchingFunction)
3494 {
3495     Edge& leftChild = node->child1();
3496     Edge& rightChild = node->child2();
3497
3498     std::optional<JSValueOperand> left;
3499     std::optional<JSValueOperand> right;
3500
3501     JSValueRegs leftRegs;
3502     JSValueRegs rightRegs;
3503
3504     FPRTemporary leftNumber(this);
3505     FPRTemporary rightNumber(this);
3506     FPRReg leftFPR = leftNumber.fpr();
3507     FPRReg rightFPR = rightNumber.fpr();
3508
3509     GPRReg scratchGPR = InvalidGPRReg;
3510     FPRReg scratchFPR = InvalidFPRReg;
3511
3512     std::optional<FPRTemporary> fprScratch;
3513     if (needsScratchFPRReg) {
3514         fprScratch.emplace(this);
3515         scratchFPR = fprScratch->fpr();
3516     }
3517
3518 #if USE(JSVALUE64)
3519     std::optional<GPRTemporary> gprScratch;
3520     if (needsScratchGPRReg) {
3521         gprScratch.emplace(this);
3522         scratchGPR = gprScratch->gpr();
3523     }
3524     GPRTemporary result(this);
3525     JSValueRegs resultRegs = JSValueRegs(result.gpr());
3526 #else
3527     GPRTemporary resultTag(this);
3528     GPRTemporary resultPayload(this);
3529     JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3530     if (needsScratchGPRReg)
3531         scratchGPR = resultRegs.tagGPR();
3532 #endif
3533
3534     SnippetOperand leftOperand(m_state.forNode(leftChild).resultType());
3535     SnippetOperand rightOperand(m_state.forNode(rightChild).resultType());
3536
3537     // The snippet generator does not support both operands being constant. If the left
3538     // operand is already const, we'll ignore the right operand's constness.
3539     if (leftChild->isInt32Constant())
3540         leftOperand.setConstInt32(leftChild->asInt32());
3541     else if (rightChild->isInt32Constant())
3542         rightOperand.setConstInt32(rightChild->asInt32());
3543
3544     ASSERT(!leftOperand.isConst() || !rightOperand.isConst());
3545     ASSERT(!(Generator::isLeftOperandValidConstant(leftOperand) && Generator::isRightOperandValidConstant(rightOperand)));
3546
3547     if (!Generator::isLeftOperandValidConstant(leftOperand)) {
3548         left.emplace(this, leftChild);
3549         leftRegs = left->jsValueRegs();
3550     }
3551     if (!Generator::isRightOperandValidConstant(rightOperand)) {
3552         right.emplace(this, rightChild);
3553         rightRegs = right->jsValueRegs();
3554     }
3555
3556 #if ENABLE(MATH_IC_STATS)
3557     auto inlineStart = m_jit.label();
3558 #endif
3559
3560     Box<MathICGenerationState> addICGenerationState = Box<MathICGenerationState>::create();
3561     mathIC->m_generator = Generator(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs, leftFPR, rightFPR, scratchGPR, scratchFPR);
3562
3563     bool shouldEmitProfiling = false;
3564     bool generatedInline = mathIC->generateInline(m_jit, *addICGenerationState, shouldEmitProfiling);
3565
3566     if (generatedInline) {
3567         ASSERT(!addICGenerationState->slowPathJumps.empty());
3568
3569         Vector<SilentRegisterSavePlan> savePlans;
3570         silentSpillAllRegistersImpl(false, savePlans, resultRegs);
3571
3572         auto done = m_jit.label();
3573
3574         addSlowPathGenerator([=, savePlans = WTFMove(savePlans)] () {
3575             addICGenerationState->slowPathJumps.link(&m_jit);
3576             addICGenerationState->slowPathStart = m_jit.label();
3577 #if ENABLE(MATH_IC_STATS)
3578             auto slowPathStart = m_jit.label();
3579 #endif
3580
3581             silentSpill(savePlans);
3582
3583             auto innerLeftRegs = leftRegs;
3584             auto innerRightRegs = rightRegs;
3585             if (Generator::isLeftOperandValidConstant(leftOperand)) {
3586                 innerLeftRegs = resultRegs;
3587                 m_jit.moveValue(leftChild->asJSValue(), innerLeftRegs);
3588             } else if (Generator::isRightOperandValidConstant(rightOperand)) {
3589                 innerRightRegs = resultRegs;
3590                 m_jit.moveValue(rightChild->asJSValue(), innerRightRegs);
3591             }
3592
3593             if (addICGenerationState->shouldSlowPathRepatch)
3594                 addICGenerationState->slowPathCall = callOperation(bitwise_cast<J_JITOperation_EJJMic>(repatchingFunction), resultRegs, innerLeftRegs, innerRightRegs, TrustedImmPtr(mathIC));
3595             else
3596                 addICGenerationState->slowPathCall = callOperation(nonRepatchingFunction, resultRegs, innerLeftRegs, innerRightRegs);
3597
3598             silentFill(savePlans);
3599             m_jit.exceptionCheck();
3600             m_jit.jump().linkTo(done, &m_jit);
3601
3602             m_jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
3603                 mathIC->finalizeInlineCode(*addICGenerationState, linkBuffer);
3604             });
3605
3606 #if ENABLE(MATH_IC_STATS)
3607             auto slowPathEnd = m_jit.label();
3608             m_jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
3609                 size_t size = static_cast<char*>(linkBuffer.locationOf(slowPathEnd).executableAddress()) - static_cast<char*>(linkBuffer.locationOf(slowPathStart).executableAddress());
3610                 mathIC->m_generatedCodeSize += size;
3611             });
3612 #endif
3613
3614         });
3615     } else {
3616         if (Generator::isLeftOperandValidConstant(leftOperand)) {
3617             left.emplace(this, leftChild);
3618             leftRegs = left->jsValueRegs();
3619         } else if (Generator::isRightOperandValidConstant(rightOperand)) {
3620             right.emplace(this, rightChild);
3621             rightRegs = right->jsValueRegs();
3622         }
3623
3624         flushRegisters();
3625         callOperation(nonRepatchingFunction, resultRegs, leftRegs, rightRegs);
3626         m_jit.exceptionCheck();
3627     }
3628
3629 #if ENABLE(MATH_IC_STATS)
3630     auto inlineEnd = m_jit.label();
3631     m_jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
3632         size_t size = static_cast<char*>(linkBuffer.locationOf(inlineEnd).executableAddress()) - static_cast<char*>(linkBuffer.locationOf(inlineStart).executableAddress());
3633         mathIC->m_generatedCodeSize += size;
3634     });
3635 #endif
3636
3637     jsValueResult(resultRegs, node);
3638     return;
3639 }
3640
3641 void SpeculativeJIT::compileInstanceOfCustom(Node* node)
3642 {
3643     // We could do something smarter here but this case is currently super rare and unless
3644     // Symbol.hasInstance becomes popular will likely remain that way.
3645
3646     JSValueOperand value(this, node->child1());
3647     SpeculateCellOperand constructor(this, node->child2());
3648     JSValueOperand hasInstanceValue(this, node->child3());
3649     GPRTemporary result(this);
3650
3651     JSValueRegs valueRegs = value.jsValueRegs();
3652     GPRReg constructorGPR = constructor.gpr();
3653     JSValueRegs hasInstanceRegs = hasInstanceValue.jsValueRegs();
3654     GPRReg resultGPR = result.gpr();
3655
3656     MacroAssembler::Jump slowCase = m_jit.jump();
3657
3658     addSlowPathGenerator(slowPathCall(slowCase, this, operationInstanceOfCustom, resultGPR, valueRegs, constructorGPR, hasInstanceRegs));
3659
3660     unblessedBooleanResult(resultGPR, node);
3661 }
3662
3663 void SpeculativeJIT::compileIsCellWithType(Node* node)
3664 {
3665     switch (node->child1().useKind()) {
3666     case UntypedUse: {
3667         JSValueOperand value(this, node->child1());
3668 #if USE(JSVALUE64)
3669         GPRTemporary result(this, Reuse, value);
3670 #else
3671         GPRTemporary result(this, Reuse, value, PayloadWord);
3672 #endif
3673
3674         JSValueRegs valueRegs = value.jsValueRegs();
3675         GPRReg resultGPR = result.gpr();
3676
3677         JITCompiler::Jump isNotCell = m_jit.branchIfNotCell(valueRegs);
3678
3679         m_jit.compare8(JITCompiler::Equal,
3680             JITCompiler::Address(valueRegs.payloadGPR(), JSCell::typeInfoTypeOffset()),
3681             TrustedImm32(node->queriedType()),
3682             resultGPR);
3683         blessBoolean(resultGPR);
3684         JITCompiler::Jump done = m_jit.jump();
3685
3686         isNotCell.link(&m_jit);
3687         moveFalseTo(resultGPR);
3688
3689         done.link(&m_jit);
3690         blessedBooleanResult(resultGPR, node);
3691         return;
3692     }
3693
3694     case CellUse: {
3695         SpeculateCellOperand cell(this, node->child1());
3696         GPRTemporary result(this, Reuse, cell);
3697
3698         GPRReg cellGPR = cell.gpr();
3699         GPRReg resultGPR = result.gpr();
3700
3701         m_jit.compare8(JITCompiler::Equal,
3702             JITCompiler::Address(cellGPR, JSCell::typeInfoTypeOffset()),
3703             TrustedImm32(node->queriedType()),
3704             resultGPR);
3705         blessBoolean(resultGPR);
3706         blessedBooleanResult(resultGPR, node);
3707         return;
3708     }
3709
3710     default:
3711         RELEASE_ASSERT_NOT_REACHED();
3712         break;
3713     }
3714 }
3715
3716 void SpeculativeJIT::compileIsTypedArrayView(Node* node)
3717 {
3718     JSValueOperand value(this, node->child1());
3719 #if USE(JSVALUE64)
3720     GPRTemporary result(this, Reuse, value);
3721 #else
3722     GPRTemporary result(this, Reuse, value, PayloadWord);
3723 #endif
3724
3725     JSValueRegs valueRegs = value.jsValueRegs();
3726     GPRReg resultGPR = result.gpr();
3727
3728     JITCompiler::Jump isNotCell = m_jit.branchIfNotCell(valueRegs);
3729
3730     m_jit.load8(JITCompiler::Address(valueRegs.payloadGPR(), JSCell::typeInfoTypeOffset()), resultGPR);
3731     m_jit.sub32(TrustedImm32(Int8ArrayType), resultGPR);
3732     m_jit.compare32(JITCompiler::BelowOrEqual,
3733         resultGPR,
3734         TrustedImm32(Float64ArrayType - Int8ArrayType),
3735         resultGPR);
3736     blessBoolean(resultGPR);
3737     JITCompiler::Jump done = m_jit.jump();
3738
3739     isNotCell.link(&m_jit);
3740     moveFalseTo(resultGPR);
3741
3742     done.link(&m_jit);
3743     blessedBooleanResult(resultGPR, node);
3744 }
3745
3746 void SpeculativeJIT::compileCallObjectConstructor(Node* node)
3747 {
3748     RELEASE_ASSERT(node->child1().useKind() == UntypedUse);
3749     JSValueOperand value(this, node->child1());
3750 #if USE(JSVALUE64)
3751     GPRTemporary result(this, Reuse, value);
3752 #else
3753     GPRTemporary result(this, Reuse, value, PayloadWord);
3754 #endif
3755
3756     JSValueRegs valueRegs = value.jsValueRegs();
3757     GPRReg resultGPR = result.gpr();
3758
3759     MacroAssembler::JumpList slowCases;
3760     slowCases.append(m_jit.branchIfNotCell(valueRegs));
3761     slowCases.append(m_jit.branchIfNotObject(valueRegs.payloadGPR()));
3762     m_jit.move(valueRegs.payloadGPR(), resultGPR);
3763
3764     addSlowPathGenerator(slowPathCall(slowCases, this, operationObjectConstructor, resultGPR, m_jit.globalObjectFor(node->origin.semantic), valueRegs));
3765     cellResult(resultGPR, node);
3766 }
3767
3768 void SpeculativeJIT::compileArithAdd(Node* node)
3769 {
3770     switch (node->binaryUseKind()) {
3771     case Int32Use: {
3772         ASSERT(!shouldCheckNegativeZero(node->arithMode()));
3773
3774         if (node->child2()->isInt32Constant()) {
3775             SpeculateInt32Operand op1(this, node->child1());
3776             GPRTemporary result(this, Reuse, op1);
3777
3778             GPRReg gpr1 = op1.gpr();
3779             int32_t imm2 = node->child2()->asInt32();
3780             GPRReg gprResult = result.gpr();
3781
3782             if (!shouldCheckOverflow(node->arithMode())) {
3783                 m_jit.add32(Imm32(imm2), gpr1, gprResult);
3784                 int32Result(gprResult, node);
3785                 return;
3786             }
3787
3788             MacroAssembler::Jump check = m_jit.branchAdd32(MacroAssembler::Overflow, gpr1, Imm32(imm2), gprResult);
3789             if (gpr1 == gprResult) {
3790                 speculationCheck(Overflow, JSValueRegs(), 0, check,
3791                     SpeculationRecovery(SpeculativeAddImmediate, gpr1, imm2));
3792             } else
3793                 speculationCheck(Overflow, JSValueRegs(), 0, check);
3794
3795             int32Result(gprResult, node);
3796             return;
3797         }
3798                 
3799         SpeculateInt32Operand op1(this, node->child1());
3800         SpeculateInt32Operand op2(this, node->child2());
3801         GPRTemporary result(this, Reuse, op1, op2);
3802
3803         GPRReg gpr1 = op1.gpr();
3804         GPRReg gpr2 = op2.gpr();
3805         GPRReg gprResult = result.gpr();
3806
3807         if (!shouldCheckOverflow(node->arithMode()))
3808             m_jit.add32(gpr1, gpr2, gprResult);
3809         else {
3810             MacroAssembler::Jump check = m_jit.branchAdd32(MacroAssembler::Overflow, gpr1, gpr2, gprResult);
3811                 
3812             if (gpr1 == gprResult)
3813                 speculationCheck(Overflow, JSValueRegs(), 0, check, SpeculationRecovery(SpeculativeAdd, gprResult, gpr2));
3814             else if (gpr2 == gprResult)
3815                 speculationCheck(Overflow, JSValueRegs(), 0, check, SpeculationRecovery(SpeculativeAdd, gprResult, gpr1));
3816             else
3817                 speculationCheck(Overflow, JSValueRegs(), 0, check);
3818         }
3819
3820         int32Result(gprResult, node);
3821         return;
3822     }
3823         
3824 #if USE(JSVALUE64)
3825     case Int52RepUse: {
3826         ASSERT(shouldCheckOverflow(node->arithMode()));
3827         ASSERT(!shouldCheckNegativeZero(node->arithMode()));
3828
3829         // Will we need an overflow check? If we can prove that neither input can be
3830         // Int52 then the overflow check will not be necessary.
3831         if (!m_state.forNode(node->child1()).couldBeType(SpecInt52Only)
3832             && !m_state.forNode(node->child2()).couldBeType(SpecInt52Only)) {
3833             SpeculateWhicheverInt52Operand op1(this, node->child1());
3834             SpeculateWhicheverInt52Operand op2(this, node->child2(), op1);
3835             GPRTemporary result(this, Reuse, op1);
3836             m_jit.add64(op1.gpr(), op2.gpr(), result.gpr());
3837             int52Result(result.gpr(), node, op1.format());
3838             return;
3839         }
3840         
3841         SpeculateInt52Operand op1(this, node->child1());
3842         SpeculateInt52Operand op2(this, node->child2());
3843         GPRTemporary result(this);
3844         m_jit.move(op1.gpr(), result.gpr());
3845         speculationCheck(
3846             Int52Overflow, JSValueRegs(), 0,
3847             m_jit.branchAdd64(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
3848         int52Result(result.gpr(), node);
3849         return;
3850     }
3851 #endif // USE(JSVALUE64)
3852     
3853     case DoubleRepUse: {
3854         SpeculateDoubleOperand op1(this, node->child1());
3855         SpeculateDoubleOperand op2(this, node->child2());
3856         FPRTemporary result(this, op1, op2);
3857
3858         FPRReg reg1 = op1.fpr();
3859         FPRReg reg2 = op2.fpr();
3860         m_jit.addDouble(reg1, reg2, result.fpr());
3861
3862         doubleResult(result.fpr(), node);
3863         return;
3864     }
3865         
3866     default:
3867         RELEASE_ASSERT_NOT_REACHED();
3868         break;
3869     }
3870 }
3871
3872 void SpeculativeJIT::compileMakeRope(Node* node)
3873 {
3874     ASSERT(node->child1().useKind() == KnownStringUse);
3875     ASSERT(node->child2().useKind() == KnownStringUse);
3876     ASSERT(!node->child3() || node->child3().useKind() == KnownStringUse);
3877     
3878     SpeculateCellOperand op1(this, node->child1());
3879     SpeculateCellOperand op2(this, node->child2());
3880     SpeculateCellOperand op3(this, node->child3());
3881     GPRTemporary result(this);
3882     GPRTemporary allocator(this);
3883     GPRTemporary scratch(this);
3884     
3885     GPRReg opGPRs[3];
3886     unsigned numOpGPRs;
3887     opGPRs[0] = op1.gpr();
3888     opGPRs[1] = op2.gpr();
3889     if (node->child3()) {
3890     &nbs