[JSC] Add a new byte code op_define_property instead of calling defineProperty
[WebKit-https.git] / Source / JavaScriptCore / dfg / DFGSpeculativeJIT.cpp
1 /*
2  * Copyright (C) 2011-2016 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "DFGSpeculativeJIT.h"
28
29 #if ENABLE(DFG_JIT)
30
31 #include "BinarySwitch.h"
32 #include "DFGAbstractInterpreterInlines.h"
33 #include "DFGArrayifySlowPathGenerator.h"
34 #include "DFGCallArrayAllocatorSlowPathGenerator.h"
35 #include "DFGCallCreateDirectArgumentsSlowPathGenerator.h"
36 #include "DFGCapabilities.h"
37 #include "DFGMayExit.h"
38 #include "DFGOSRExitFuzz.h"
39 #include "DFGSaneStringGetByValSlowPathGenerator.h"
40 #include "DFGSlowPathGenerator.h"
41 #include "DirectArguments.h"
42 #include "JITAddGenerator.h"
43 #include "JITBitAndGenerator.h"
44 #include "JITBitOrGenerator.h"
45 #include "JITBitXorGenerator.h"
46 #include "JITDivGenerator.h"
47 #include "JITLeftShiftGenerator.h"
48 #include "JITMulGenerator.h"
49 #include "JITRightShiftGenerator.h"
50 #include "JITSubGenerator.h"
51 #include "JSCInlines.h"
52 #include "JSEnvironmentRecord.h"
53 #include "JSGeneratorFunction.h"
54 #include "JSLexicalEnvironment.h"
55 #include "LinkBuffer.h"
56 #include "RegExpConstructor.h"
57 #include "ScopedArguments.h"
58 #include "ScratchRegisterAllocator.h"
59 #include "WriteBarrierBuffer.h"
60 #include <wtf/Box.h>
61 #include <wtf/MathExtras.h>
62
63 namespace JSC { namespace DFG {
64
65 SpeculativeJIT::SpeculativeJIT(JITCompiler& jit)
66     : m_compileOkay(true)
67     , m_jit(jit)
68     , m_currentNode(0)
69     , m_lastGeneratedNode(LastNodeType)
70     , m_indexInBlock(0)
71     , m_generationInfo(m_jit.graph().frameRegisterCount())
72     , m_state(m_jit.graph())
73     , m_interpreter(m_jit.graph(), m_state)
74     , m_stream(&jit.jitCode()->variableEventStream)
75     , m_minifiedGraph(&jit.jitCode()->minifiedDFG)
76 {
77 }
78
79 SpeculativeJIT::~SpeculativeJIT()
80 {
81 }
82
83 void SpeculativeJIT::emitAllocateRawObject(GPRReg resultGPR, Structure* structure, GPRReg storageGPR, unsigned numElements, unsigned vectorLength)
84 {
85     IndexingType indexingType = structure->indexingType();
86     bool hasIndexingHeader = hasIndexedProperties(indexingType);
87
88     unsigned inlineCapacity = structure->inlineCapacity();
89     unsigned outOfLineCapacity = structure->outOfLineCapacity();
90     
91     GPRTemporary scratch(this);
92     GPRTemporary scratch2(this);
93     GPRReg scratchGPR = scratch.gpr();
94     GPRReg scratch2GPR = scratch2.gpr();
95
96     ASSERT(vectorLength >= numElements);
97     vectorLength = Butterfly::optimalContiguousVectorLength(structure, vectorLength);
98     
99     JITCompiler::JumpList slowCases;
100
101     size_t size = 0;
102     if (hasIndexingHeader)
103         size += vectorLength * sizeof(JSValue) + sizeof(IndexingHeader);
104     size += outOfLineCapacity * sizeof(JSValue);
105
106     m_jit.move(TrustedImmPtr(0), storageGPR);
107     
108     if (size) {
109         if (MarkedAllocator* allocator = m_jit.vm()->heap.allocatorForAuxiliaryData(size)) {
110             m_jit.move(TrustedImmPtr(allocator), scratchGPR);
111             m_jit.emitAllocate(storageGPR, allocator, scratchGPR, scratch2GPR, slowCases);
112             
113             m_jit.addPtr(
114                 TrustedImm32(outOfLineCapacity * sizeof(JSValue) + sizeof(IndexingHeader)),
115                 storageGPR);
116             
117             if (hasIndexingHeader)
118                 m_jit.store32(TrustedImm32(vectorLength), MacroAssembler::Address(storageGPR, Butterfly::offsetOfVectorLength()));
119         } else
120             slowCases.append(m_jit.jump());
121     }
122
123     size_t allocationSize = JSFinalObject::allocationSize(inlineCapacity);
124     MarkedAllocator* allocatorPtr = m_jit.vm()->heap.allocatorForObjectWithoutDestructor(allocationSize);
125     if (allocatorPtr) {
126         m_jit.move(TrustedImmPtr(allocatorPtr), scratchGPR);
127         emitAllocateJSObject(resultGPR, allocatorPtr, scratchGPR, TrustedImmPtr(structure), storageGPR, scratch2GPR, slowCases);
128     } else
129         slowCases.append(m_jit.jump());
130
131     // I want a slow path that also loads out the storage pointer, and that's
132     // what this custom CallArrayAllocatorSlowPathGenerator gives me. It's a lot
133     // of work for a very small piece of functionality. :-/
134     addSlowPathGenerator(std::make_unique<CallArrayAllocatorSlowPathGenerator>(
135         slowCases, this, operationNewRawObject, resultGPR, storageGPR,
136         structure, vectorLength));
137
138     if (numElements < vectorLength) {
139 #if USE(JSVALUE64)
140         if (hasDouble(structure->indexingType()))
141             m_jit.move(TrustedImm64(bitwise_cast<int64_t>(PNaN)), scratchGPR);
142         else
143             m_jit.move(TrustedImm64(JSValue::encode(JSValue())), scratchGPR);
144         for (unsigned i = numElements; i < vectorLength; ++i)
145             m_jit.store64(scratchGPR, MacroAssembler::Address(storageGPR, sizeof(double) * i));
146 #else
147         EncodedValueDescriptor value;
148         if (hasDouble(structure->indexingType()))
149             value.asInt64 = JSValue::encode(JSValue(JSValue::EncodeAsDouble, PNaN));
150         else
151             value.asInt64 = JSValue::encode(JSValue());
152         for (unsigned i = numElements; i < vectorLength; ++i) {
153             m_jit.store32(TrustedImm32(value.asBits.tag), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
154             m_jit.store32(TrustedImm32(value.asBits.payload), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
155         }
156 #endif
157     }
158     
159     if (hasIndexingHeader)
160         m_jit.store32(TrustedImm32(numElements), MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()));
161 }
162
163 void SpeculativeJIT::emitGetLength(InlineCallFrame* inlineCallFrame, GPRReg lengthGPR, bool includeThis)
164 {
165     if (inlineCallFrame && !inlineCallFrame->isVarargs())
166         m_jit.move(TrustedImm32(inlineCallFrame->arguments.size() - !includeThis), lengthGPR);
167     else {
168         VirtualRegister argumentCountRegister;
169         if (!inlineCallFrame)
170             argumentCountRegister = VirtualRegister(CallFrameSlot::argumentCount);
171         else
172             argumentCountRegister = inlineCallFrame->argumentCountRegister;
173         m_jit.load32(JITCompiler::payloadFor(argumentCountRegister), lengthGPR);
174         if (!includeThis)
175             m_jit.sub32(TrustedImm32(1), lengthGPR);
176     }
177 }
178
179 void SpeculativeJIT::emitGetLength(CodeOrigin origin, GPRReg lengthGPR, bool includeThis)
180 {
181     emitGetLength(origin.inlineCallFrame, lengthGPR, includeThis);
182 }
183
184 void SpeculativeJIT::emitGetCallee(CodeOrigin origin, GPRReg calleeGPR)
185 {
186     if (origin.inlineCallFrame) {
187         if (origin.inlineCallFrame->isClosureCall) {
188             m_jit.loadPtr(
189                 JITCompiler::addressFor(origin.inlineCallFrame->calleeRecovery.virtualRegister()),
190                 calleeGPR);
191         } else {
192             m_jit.move(
193                 TrustedImmPtr(origin.inlineCallFrame->calleeRecovery.constant().asCell()),
194                 calleeGPR);
195         }
196     } else
197         m_jit.loadPtr(JITCompiler::addressFor(CallFrameSlot::callee), calleeGPR);
198 }
199
200 void SpeculativeJIT::emitGetArgumentStart(CodeOrigin origin, GPRReg startGPR)
201 {
202     m_jit.addPtr(
203         TrustedImm32(
204             JITCompiler::argumentsStart(origin).offset() * static_cast<int>(sizeof(Register))),
205         GPRInfo::callFrameRegister, startGPR);
206 }
207
208 MacroAssembler::Jump SpeculativeJIT::emitOSRExitFuzzCheck()
209 {
210     if (!Options::useOSRExitFuzz()
211         || !canUseOSRExitFuzzing(m_jit.graph().baselineCodeBlockFor(m_origin.semantic))
212         || !doOSRExitFuzzing())
213         return MacroAssembler::Jump();
214     
215     MacroAssembler::Jump result;
216     
217     m_jit.pushToSave(GPRInfo::regT0);
218     m_jit.load32(&g_numberOfOSRExitFuzzChecks, GPRInfo::regT0);
219     m_jit.add32(TrustedImm32(1), GPRInfo::regT0);
220     m_jit.store32(GPRInfo::regT0, &g_numberOfOSRExitFuzzChecks);
221     unsigned atOrAfter = Options::fireOSRExitFuzzAtOrAfter();
222     unsigned at = Options::fireOSRExitFuzzAt();
223     if (at || atOrAfter) {
224         unsigned threshold;
225         MacroAssembler::RelationalCondition condition;
226         if (atOrAfter) {
227             threshold = atOrAfter;
228             condition = MacroAssembler::Below;
229         } else {
230             threshold = at;
231             condition = MacroAssembler::NotEqual;
232         }
233         MacroAssembler::Jump ok = m_jit.branch32(
234             condition, GPRInfo::regT0, MacroAssembler::TrustedImm32(threshold));
235         m_jit.popToRestore(GPRInfo::regT0);
236         result = m_jit.jump();
237         ok.link(&m_jit);
238     }
239     m_jit.popToRestore(GPRInfo::regT0);
240     
241     return result;
242 }
243
244 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail)
245 {
246     if (!m_compileOkay)
247         return;
248     JITCompiler::Jump fuzzJump = emitOSRExitFuzzCheck();
249     if (fuzzJump.isSet()) {
250         JITCompiler::JumpList jumpsToFail;
251         jumpsToFail.append(fuzzJump);
252         jumpsToFail.append(jumpToFail);
253         m_jit.appendExitInfo(jumpsToFail);
254     } else
255         m_jit.appendExitInfo(jumpToFail);
256     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
257 }
258
259 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, const MacroAssembler::JumpList& jumpsToFail)
260 {
261     if (!m_compileOkay)
262         return;
263     JITCompiler::Jump fuzzJump = emitOSRExitFuzzCheck();
264     if (fuzzJump.isSet()) {
265         JITCompiler::JumpList myJumpsToFail;
266         myJumpsToFail.append(jumpsToFail);
267         myJumpsToFail.append(fuzzJump);
268         m_jit.appendExitInfo(myJumpsToFail);
269     } else
270         m_jit.appendExitInfo(jumpsToFail);
271     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
272 }
273
274 OSRExitJumpPlaceholder SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node)
275 {
276     if (!m_compileOkay)
277         return OSRExitJumpPlaceholder();
278     unsigned index = m_jit.jitCode()->osrExit.size();
279     m_jit.appendExitInfo();
280     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
281     return OSRExitJumpPlaceholder(index);
282 }
283
284 OSRExitJumpPlaceholder SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse)
285 {
286     return speculationCheck(kind, jsValueSource, nodeUse.node());
287 }
288
289 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail)
290 {
291     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail);
292 }
293
294 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, const MacroAssembler::JumpList& jumpsToFail)
295 {
296     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpsToFail);
297 }
298
299 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
300 {
301     if (!m_compileOkay)
302         return;
303     unsigned recoveryIndex = m_jit.jitCode()->appendSpeculationRecovery(recovery);
304     m_jit.appendExitInfo(jumpToFail);
305     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size(), recoveryIndex));
306 }
307
308 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
309 {
310     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail, recovery);
311 }
312
313 void SpeculativeJIT::emitInvalidationPoint(Node* node)
314 {
315     if (!m_compileOkay)
316         return;
317     OSRExitCompilationInfo& info = m_jit.appendExitInfo(JITCompiler::JumpList());
318     m_jit.jitCode()->appendOSRExit(OSRExit(
319         UncountableInvalidation, JSValueSource(),
320         m_jit.graph().methodOfGettingAValueProfileFor(node),
321         this, m_stream->size()));
322     info.m_replacementSource = m_jit.watchpointLabel();
323     ASSERT(info.m_replacementSource.isSet());
324     noResult(node);
325 }
326
327 void SpeculativeJIT::unreachable(Node* node)
328 {
329     m_compileOkay = false;
330     m_jit.abortWithReason(DFGUnreachableNode, node->op());
331 }
332
333 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Node* node)
334 {
335     if (!m_compileOkay)
336         return;
337     speculationCheck(kind, jsValueRegs, node, m_jit.jump());
338     m_compileOkay = false;
339     if (verboseCompilationEnabled())
340         dataLog("Bailing compilation.\n");
341 }
342
343 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Edge nodeUse)
344 {
345     terminateSpeculativeExecution(kind, jsValueRegs, nodeUse.node());
346 }
347
348 void SpeculativeJIT::typeCheck(JSValueSource source, Edge edge, SpeculatedType typesPassedThrough, MacroAssembler::Jump jumpToFail, ExitKind exitKind)
349 {
350     ASSERT(needsTypeCheck(edge, typesPassedThrough));
351     m_interpreter.filter(edge, typesPassedThrough);
352     speculationCheck(exitKind, source, edge.node(), jumpToFail);
353 }
354
355 RegisterSet SpeculativeJIT::usedRegisters()
356 {
357     RegisterSet result;
358     
359     for (unsigned i = GPRInfo::numberOfRegisters; i--;) {
360         GPRReg gpr = GPRInfo::toRegister(i);
361         if (m_gprs.isInUse(gpr))
362             result.set(gpr);
363     }
364     for (unsigned i = FPRInfo::numberOfRegisters; i--;) {
365         FPRReg fpr = FPRInfo::toRegister(i);
366         if (m_fprs.isInUse(fpr))
367             result.set(fpr);
368     }
369     
370     result.merge(RegisterSet::stubUnavailableRegisters());
371     
372     return result;
373 }
374
375 void SpeculativeJIT::addSlowPathGenerator(std::unique_ptr<SlowPathGenerator> slowPathGenerator)
376 {
377     m_slowPathGenerators.append(WTFMove(slowPathGenerator));
378 }
379
380 void SpeculativeJIT::addSlowPathGenerator(std::function<void()> lambda)
381 {
382     m_slowPathLambdas.append(SlowPathLambda{ lambda, m_currentNode, static_cast<unsigned>(m_stream->size()) });
383 }
384
385 void SpeculativeJIT::runSlowPathGenerators(PCToCodeOriginMapBuilder& pcToCodeOriginMapBuilder)
386 {
387     for (auto& slowPathGenerator : m_slowPathGenerators) {
388         pcToCodeOriginMapBuilder.appendItem(m_jit.label(), slowPathGenerator->origin().semantic);
389         slowPathGenerator->generate(this);
390     }
391     for (auto& slowPathLambda : m_slowPathLambdas) {
392         Node* currentNode = slowPathLambda.currentNode;
393         m_currentNode = currentNode;
394         m_outOfLineStreamIndex = slowPathLambda.streamIndex;
395         pcToCodeOriginMapBuilder.appendItem(m_jit.label(), currentNode->origin.semantic);
396         slowPathLambda.generator();
397         m_outOfLineStreamIndex = Nullopt;
398     }
399 }
400
401 void SpeculativeJIT::clearGenerationInfo()
402 {
403     for (unsigned i = 0; i < m_generationInfo.size(); ++i)
404         m_generationInfo[i] = GenerationInfo();
405     m_gprs = RegisterBank<GPRInfo>();
406     m_fprs = RegisterBank<FPRInfo>();
407 }
408
409 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForGPR(VirtualRegister spillMe, GPRReg source)
410 {
411     GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
412     Node* node = info.node();
413     DataFormat registerFormat = info.registerFormat();
414     ASSERT(registerFormat != DataFormatNone);
415     ASSERT(registerFormat != DataFormatDouble);
416         
417     SilentSpillAction spillAction;
418     SilentFillAction fillAction;
419         
420     if (!info.needsSpill())
421         spillAction = DoNothingForSpill;
422     else {
423 #if USE(JSVALUE64)
424         ASSERT(info.gpr() == source);
425         if (registerFormat == DataFormatInt32)
426             spillAction = Store32Payload;
427         else if (registerFormat == DataFormatCell || registerFormat == DataFormatStorage)
428             spillAction = StorePtr;
429         else if (registerFormat == DataFormatInt52 || registerFormat == DataFormatStrictInt52)
430             spillAction = Store64;
431         else {
432             ASSERT(registerFormat & DataFormatJS);
433             spillAction = Store64;
434         }
435 #elif USE(JSVALUE32_64)
436         if (registerFormat & DataFormatJS) {
437             ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
438             spillAction = source == info.tagGPR() ? Store32Tag : Store32Payload;
439         } else {
440             ASSERT(info.gpr() == source);
441             spillAction = Store32Payload;
442         }
443 #endif
444     }
445         
446     if (registerFormat == DataFormatInt32) {
447         ASSERT(info.gpr() == source);
448         ASSERT(isJSInt32(info.registerFormat()));
449         if (node->hasConstant()) {
450             ASSERT(node->isInt32Constant());
451             fillAction = SetInt32Constant;
452         } else
453             fillAction = Load32Payload;
454     } else if (registerFormat == DataFormatBoolean) {
455 #if USE(JSVALUE64)
456         RELEASE_ASSERT_NOT_REACHED();
457 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
458         fillAction = DoNothingForFill;
459 #endif
460 #elif USE(JSVALUE32_64)
461         ASSERT(info.gpr() == source);
462         if (node->hasConstant()) {
463             ASSERT(node->isBooleanConstant());
464             fillAction = SetBooleanConstant;
465         } else
466             fillAction = Load32Payload;
467 #endif
468     } else if (registerFormat == DataFormatCell) {
469         ASSERT(info.gpr() == source);
470         if (node->hasConstant()) {
471             DFG_ASSERT(m_jit.graph(), m_currentNode, node->isCellConstant());
472             node->asCell(); // To get the assertion.
473             fillAction = SetCellConstant;
474         } else {
475 #if USE(JSVALUE64)
476             fillAction = LoadPtr;
477 #else
478             fillAction = Load32Payload;
479 #endif
480         }
481     } else if (registerFormat == DataFormatStorage) {
482         ASSERT(info.gpr() == source);
483         fillAction = LoadPtr;
484     } else if (registerFormat == DataFormatInt52) {
485         if (node->hasConstant())
486             fillAction = SetInt52Constant;
487         else if (info.spillFormat() == DataFormatInt52)
488             fillAction = Load64;
489         else if (info.spillFormat() == DataFormatStrictInt52)
490             fillAction = Load64ShiftInt52Left;
491         else if (info.spillFormat() == DataFormatNone)
492             fillAction = Load64;
493         else {
494             RELEASE_ASSERT_NOT_REACHED();
495 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
496             fillAction = Load64; // Make GCC happy.
497 #endif
498         }
499     } else if (registerFormat == DataFormatStrictInt52) {
500         if (node->hasConstant())
501             fillAction = SetStrictInt52Constant;
502         else if (info.spillFormat() == DataFormatInt52)
503             fillAction = Load64ShiftInt52Right;
504         else if (info.spillFormat() == DataFormatStrictInt52)
505             fillAction = Load64;
506         else if (info.spillFormat() == DataFormatNone)
507             fillAction = Load64;
508         else {
509             RELEASE_ASSERT_NOT_REACHED();
510 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
511             fillAction = Load64; // Make GCC happy.
512 #endif
513         }
514     } else {
515         ASSERT(registerFormat & DataFormatJS);
516 #if USE(JSVALUE64)
517         ASSERT(info.gpr() == source);
518         if (node->hasConstant()) {
519             if (node->isCellConstant())
520                 fillAction = SetTrustedJSConstant;
521             else
522                 fillAction = SetJSConstant;
523         } else if (info.spillFormat() == DataFormatInt32) {
524             ASSERT(registerFormat == DataFormatJSInt32);
525             fillAction = Load32PayloadBoxInt;
526         } else
527             fillAction = Load64;
528 #else
529         ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
530         if (node->hasConstant())
531             fillAction = info.tagGPR() == source ? SetJSConstantTag : SetJSConstantPayload;
532         else if (info.payloadGPR() == source)
533             fillAction = Load32Payload;
534         else { // Fill the Tag
535             switch (info.spillFormat()) {
536             case DataFormatInt32:
537                 ASSERT(registerFormat == DataFormatJSInt32);
538                 fillAction = SetInt32Tag;
539                 break;
540             case DataFormatCell:
541                 ASSERT(registerFormat == DataFormatJSCell);
542                 fillAction = SetCellTag;
543                 break;
544             case DataFormatBoolean:
545                 ASSERT(registerFormat == DataFormatJSBoolean);
546                 fillAction = SetBooleanTag;
547                 break;
548             default:
549                 fillAction = Load32Tag;
550                 break;
551             }
552         }
553 #endif
554     }
555         
556     return SilentRegisterSavePlan(spillAction, fillAction, node, source);
557 }
558     
559 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForFPR(VirtualRegister spillMe, FPRReg source)
560 {
561     GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
562     Node* node = info.node();
563     ASSERT(info.registerFormat() == DataFormatDouble);
564
565     SilentSpillAction spillAction;
566     SilentFillAction fillAction;
567         
568     if (!info.needsSpill())
569         spillAction = DoNothingForSpill;
570     else {
571         ASSERT(!node->hasConstant());
572         ASSERT(info.spillFormat() == DataFormatNone);
573         ASSERT(info.fpr() == source);
574         spillAction = StoreDouble;
575     }
576         
577 #if USE(JSVALUE64)
578     if (node->hasConstant()) {
579         node->asNumber(); // To get the assertion.
580         fillAction = SetDoubleConstant;
581     } else {
582         ASSERT(info.spillFormat() == DataFormatNone || info.spillFormat() == DataFormatDouble);
583         fillAction = LoadDouble;
584     }
585 #elif USE(JSVALUE32_64)
586     ASSERT(info.registerFormat() == DataFormatDouble);
587     if (node->hasConstant()) {
588         node->asNumber(); // To get the assertion.
589         fillAction = SetDoubleConstant;
590     } else
591         fillAction = LoadDouble;
592 #endif
593
594     return SilentRegisterSavePlan(spillAction, fillAction, node, source);
595 }
596     
597 void SpeculativeJIT::silentSpill(const SilentRegisterSavePlan& plan)
598 {
599     switch (plan.spillAction()) {
600     case DoNothingForSpill:
601         break;
602     case Store32Tag:
603         m_jit.store32(plan.gpr(), JITCompiler::tagFor(plan.node()->virtualRegister()));
604         break;
605     case Store32Payload:
606         m_jit.store32(plan.gpr(), JITCompiler::payloadFor(plan.node()->virtualRegister()));
607         break;
608     case StorePtr:
609         m_jit.storePtr(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
610         break;
611 #if USE(JSVALUE64)
612     case Store64:
613         m_jit.store64(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
614         break;
615 #endif
616     case StoreDouble:
617         m_jit.storeDouble(plan.fpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
618         break;
619     default:
620         RELEASE_ASSERT_NOT_REACHED();
621     }
622 }
623     
624 void SpeculativeJIT::silentFill(const SilentRegisterSavePlan& plan, GPRReg canTrample)
625 {
626 #if USE(JSVALUE32_64)
627     UNUSED_PARAM(canTrample);
628 #endif
629     switch (plan.fillAction()) {
630     case DoNothingForFill:
631         break;
632     case SetInt32Constant:
633         m_jit.move(Imm32(plan.node()->asInt32()), plan.gpr());
634         break;
635 #if USE(JSVALUE64)
636     case SetInt52Constant:
637         m_jit.move(Imm64(plan.node()->asAnyInt() << JSValue::int52ShiftAmount), plan.gpr());
638         break;
639     case SetStrictInt52Constant:
640         m_jit.move(Imm64(plan.node()->asAnyInt()), plan.gpr());
641         break;
642 #endif // USE(JSVALUE64)
643     case SetBooleanConstant:
644         m_jit.move(TrustedImm32(plan.node()->asBoolean()), plan.gpr());
645         break;
646     case SetCellConstant:
647         m_jit.move(TrustedImmPtr(plan.node()->asCell()), plan.gpr());
648         break;
649 #if USE(JSVALUE64)
650     case SetTrustedJSConstant:
651         m_jit.move(valueOfJSConstantAsImm64(plan.node()).asTrustedImm64(), plan.gpr());
652         break;
653     case SetJSConstant:
654         m_jit.move(valueOfJSConstantAsImm64(plan.node()), plan.gpr());
655         break;
656     case SetDoubleConstant:
657         m_jit.move(Imm64(reinterpretDoubleToInt64(plan.node()->asNumber())), canTrample);
658         m_jit.move64ToDouble(canTrample, plan.fpr());
659         break;
660     case Load32PayloadBoxInt:
661         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
662         m_jit.or64(GPRInfo::tagTypeNumberRegister, plan.gpr());
663         break;
664     case Load32PayloadConvertToInt52:
665         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
666         m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
667         m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
668         break;
669     case Load32PayloadSignExtend:
670         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
671         m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
672         break;
673 #else
674     case SetJSConstantTag:
675         m_jit.move(Imm32(plan.node()->asJSValue().tag()), plan.gpr());
676         break;
677     case SetJSConstantPayload:
678         m_jit.move(Imm32(plan.node()->asJSValue().payload()), plan.gpr());
679         break;
680     case SetInt32Tag:
681         m_jit.move(TrustedImm32(JSValue::Int32Tag), plan.gpr());
682         break;
683     case SetCellTag:
684         m_jit.move(TrustedImm32(JSValue::CellTag), plan.gpr());
685         break;
686     case SetBooleanTag:
687         m_jit.move(TrustedImm32(JSValue::BooleanTag), plan.gpr());
688         break;
689     case SetDoubleConstant:
690         m_jit.loadDouble(TrustedImmPtr(m_jit.addressOfDoubleConstant(plan.node())), plan.fpr());
691         break;
692 #endif
693     case Load32Tag:
694         m_jit.load32(JITCompiler::tagFor(plan.node()->virtualRegister()), plan.gpr());
695         break;
696     case Load32Payload:
697         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
698         break;
699     case LoadPtr:
700         m_jit.loadPtr(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
701         break;
702 #if USE(JSVALUE64)
703     case Load64:
704         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
705         break;
706     case Load64ShiftInt52Right:
707         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
708         m_jit.rshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
709         break;
710     case Load64ShiftInt52Left:
711         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
712         m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
713         break;
714 #endif
715     case LoadDouble:
716         m_jit.loadDouble(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.fpr());
717         break;
718     default:
719         RELEASE_ASSERT_NOT_REACHED();
720     }
721 }
722     
723 JITCompiler::Jump SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode, IndexingType shape)
724 {
725     switch (arrayMode.arrayClass()) {
726     case Array::OriginalArray: {
727         CRASH();
728 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
729         JITCompiler::Jump result; // I already know that VC++ takes unkindly to the expression "return Jump()", so I'm doing it this way in anticipation of someone eventually using VC++ to compile the DFG.
730         return result;
731 #endif
732     }
733         
734     case Array::Array:
735         m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
736         return m_jit.branch32(
737             MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | shape));
738         
739     case Array::NonArray:
740     case Array::OriginalNonArray:
741         m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
742         return m_jit.branch32(
743             MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
744         
745     case Array::PossiblyArray:
746         m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
747         return m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
748     }
749     
750     RELEASE_ASSERT_NOT_REACHED();
751     return JITCompiler::Jump();
752 }
753
754 JITCompiler::JumpList SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode)
755 {
756     JITCompiler::JumpList result;
757     
758     switch (arrayMode.type()) {
759     case Array::Int32:
760         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, Int32Shape);
761
762     case Array::Double:
763         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, DoubleShape);
764
765     case Array::Contiguous:
766         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, ContiguousShape);
767
768     case Array::Undecided:
769         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, UndecidedShape);
770
771     case Array::ArrayStorage:
772     case Array::SlowPutArrayStorage: {
773         ASSERT(!arrayMode.isJSArrayWithOriginalStructure());
774         
775         if (arrayMode.isJSArray()) {
776             if (arrayMode.isSlowPut()) {
777                 result.append(
778                     m_jit.branchTest32(
779                         MacroAssembler::Zero, tempGPR, MacroAssembler::TrustedImm32(IsArray)));
780                 m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
781                 m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
782                 result.append(
783                     m_jit.branch32(
784                         MacroAssembler::Above, tempGPR,
785                         TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
786                 break;
787             }
788             m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
789             result.append(
790                 m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | ArrayStorageShape)));
791             break;
792         }
793         m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
794         if (arrayMode.isSlowPut()) {
795             m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
796             result.append(
797                 m_jit.branch32(
798                     MacroAssembler::Above, tempGPR,
799                     TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
800             break;
801         }
802         result.append(
803             m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(ArrayStorageShape)));
804         break;
805     }
806     default:
807         CRASH();
808         break;
809     }
810     
811     return result;
812 }
813
814 void SpeculativeJIT::checkArray(Node* node)
815 {
816     ASSERT(node->arrayMode().isSpecific());
817     ASSERT(!node->arrayMode().doesConversion());
818     
819     SpeculateCellOperand base(this, node->child1());
820     GPRReg baseReg = base.gpr();
821     
822     if (node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1()))) {
823         noResult(m_currentNode);
824         return;
825     }
826     
827     const ClassInfo* expectedClassInfo = 0;
828     
829     switch (node->arrayMode().type()) {
830     case Array::AnyTypedArray:
831     case Array::String:
832         RELEASE_ASSERT_NOT_REACHED(); // Should have been a Phantom(String:)
833         break;
834     case Array::Int32:
835     case Array::Double:
836     case Array::Contiguous:
837     case Array::Undecided:
838     case Array::ArrayStorage:
839     case Array::SlowPutArrayStorage: {
840         GPRTemporary temp(this);
841         GPRReg tempGPR = temp.gpr();
842         m_jit.load8(MacroAssembler::Address(baseReg, JSCell::indexingTypeOffset()), tempGPR);
843         speculationCheck(
844             BadIndexingType, JSValueSource::unboxedCell(baseReg), 0,
845             jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
846         
847         noResult(m_currentNode);
848         return;
849     }
850     case Array::DirectArguments:
851         speculateCellTypeWithoutTypeFiltering(node->child1(), baseReg, DirectArgumentsType);
852         noResult(m_currentNode);
853         return;
854     case Array::ScopedArguments:
855         speculateCellTypeWithoutTypeFiltering(node->child1(), baseReg, ScopedArgumentsType);
856         noResult(m_currentNode);
857         return;
858     default:
859         speculateCellTypeWithoutTypeFiltering(
860             node->child1(), baseReg,
861             typeForTypedArrayType(node->arrayMode().typedArrayType()));
862         noResult(m_currentNode);
863         return;
864     }
865     
866     RELEASE_ASSERT(expectedClassInfo);
867     
868     GPRTemporary temp(this);
869     GPRTemporary temp2(this);
870     m_jit.emitLoadStructure(baseReg, temp.gpr(), temp2.gpr());
871     speculationCheck(
872         BadType, JSValueSource::unboxedCell(baseReg), node,
873         m_jit.branchPtr(
874             MacroAssembler::NotEqual,
875             MacroAssembler::Address(temp.gpr(), Structure::classInfoOffset()),
876             MacroAssembler::TrustedImmPtr(expectedClassInfo)));
877     
878     noResult(m_currentNode);
879 }
880
881 void SpeculativeJIT::arrayify(Node* node, GPRReg baseReg, GPRReg propertyReg)
882 {
883     ASSERT(node->arrayMode().doesConversion());
884     
885     GPRTemporary temp(this);
886     GPRTemporary structure;
887     GPRReg tempGPR = temp.gpr();
888     GPRReg structureGPR = InvalidGPRReg;
889     
890     if (node->op() != ArrayifyToStructure) {
891         GPRTemporary realStructure(this);
892         structure.adopt(realStructure);
893         structureGPR = structure.gpr();
894     }
895         
896     // We can skip all that comes next if we already have array storage.
897     MacroAssembler::JumpList slowPath;
898     
899     if (node->op() == ArrayifyToStructure) {
900         slowPath.append(m_jit.branchWeakStructure(
901             JITCompiler::NotEqual,
902             JITCompiler::Address(baseReg, JSCell::structureIDOffset()),
903             node->structure()));
904     } else {
905         m_jit.load8(
906             MacroAssembler::Address(baseReg, JSCell::indexingTypeOffset()), tempGPR);
907         
908         slowPath.append(jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
909     }
910     
911     addSlowPathGenerator(std::make_unique<ArrayifySlowPathGenerator>(
912         slowPath, this, node, baseReg, propertyReg, tempGPR, structureGPR));
913     
914     noResult(m_currentNode);
915 }
916
917 void SpeculativeJIT::arrayify(Node* node)
918 {
919     ASSERT(node->arrayMode().isSpecific());
920     
921     SpeculateCellOperand base(this, node->child1());
922     
923     if (!node->child2()) {
924         arrayify(node, base.gpr(), InvalidGPRReg);
925         return;
926     }
927     
928     SpeculateInt32Operand property(this, node->child2());
929     
930     arrayify(node, base.gpr(), property.gpr());
931 }
932
933 GPRReg SpeculativeJIT::fillStorage(Edge edge)
934 {
935     VirtualRegister virtualRegister = edge->virtualRegister();
936     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
937     
938     switch (info.registerFormat()) {
939     case DataFormatNone: {
940         if (info.spillFormat() == DataFormatStorage) {
941             GPRReg gpr = allocate();
942             m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
943             m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr);
944             info.fillStorage(*m_stream, gpr);
945             return gpr;
946         }
947         
948         // Must be a cell; fill it as a cell and then return the pointer.
949         return fillSpeculateCell(edge);
950     }
951         
952     case DataFormatStorage: {
953         GPRReg gpr = info.gpr();
954         m_gprs.lock(gpr);
955         return gpr;
956     }
957         
958     default:
959         return fillSpeculateCell(edge);
960     }
961 }
962
963 void SpeculativeJIT::useChildren(Node* node)
964 {
965     if (node->flags() & NodeHasVarArgs) {
966         for (unsigned childIdx = node->firstChild(); childIdx < node->firstChild() + node->numChildren(); childIdx++) {
967             if (!!m_jit.graph().m_varArgChildren[childIdx])
968                 use(m_jit.graph().m_varArgChildren[childIdx]);
969         }
970     } else {
971         Edge child1 = node->child1();
972         if (!child1) {
973             ASSERT(!node->child2() && !node->child3());
974             return;
975         }
976         use(child1);
977         
978         Edge child2 = node->child2();
979         if (!child2) {
980             ASSERT(!node->child3());
981             return;
982         }
983         use(child2);
984         
985         Edge child3 = node->child3();
986         if (!child3)
987             return;
988         use(child3);
989     }
990 }
991
992 void SpeculativeJIT::compileTryGetById(Node* node)
993 {
994     switch (node->child1().useKind()) {
995     case CellUse: {
996         SpeculateCellOperand base(this, node->child1());
997         JSValueRegsTemporary result(this, Reuse, base);
998
999         JSValueRegs baseRegs = JSValueRegs::payloadOnly(base.gpr());
1000         JSValueRegs resultRegs = result.regs();
1001
1002         base.use();
1003
1004         cachedGetById(node->origin.semantic, baseRegs, resultRegs, node->identifierNumber(), JITCompiler::Jump(), NeedToSpill, AccessType::GetPure);
1005
1006         jsValueResult(resultRegs, node, DataFormatJS, UseChildrenCalledExplicitly);
1007         break;
1008     }
1009
1010     case UntypedUse: {
1011         JSValueOperand base(this, node->child1());
1012         JSValueRegsTemporary result(this, Reuse, base);
1013
1014         JSValueRegs baseRegs = base.jsValueRegs();
1015         JSValueRegs resultRegs = result.regs();
1016
1017         base.use();
1018
1019         JITCompiler::Jump notCell = m_jit.branchIfNotCell(baseRegs);
1020
1021         cachedGetById(node->origin.semantic, baseRegs, resultRegs, node->identifierNumber(), notCell, DontSpill, AccessType::GetPure);
1022
1023         jsValueResult(resultRegs, node, DataFormatJS, UseChildrenCalledExplicitly);
1024         break;
1025     }
1026
1027     default:
1028         DFG_CRASH(m_jit.graph(), node, "Bad use kind");
1029         break;
1030     } 
1031 }
1032
1033 void SpeculativeJIT::compileIn(Node* node)
1034 {
1035     SpeculateCellOperand base(this, node->child2());
1036     GPRReg baseGPR = base.gpr();
1037     
1038     if (JSString* string = node->child1()->dynamicCastConstant<JSString*>()) {
1039         if (string->tryGetValueImpl() && string->tryGetValueImpl()->isAtomic()) {
1040             StructureStubInfo* stubInfo = m_jit.codeBlock()->addStubInfo(AccessType::In);
1041             
1042             GPRTemporary result(this);
1043             GPRReg resultGPR = result.gpr();
1044
1045             use(node->child1());
1046             
1047             MacroAssembler::PatchableJump jump = m_jit.patchableJump();
1048             MacroAssembler::Label done = m_jit.label();
1049             
1050             // Since this block is executed only when the result of string->tryGetValueImpl() is atomic,
1051             // we can cast it to const AtomicStringImpl* safely.
1052             auto slowPath = slowPathCall(
1053                 jump.m_jump, this, operationInOptimize,
1054                 JSValueRegs::payloadOnly(resultGPR), stubInfo, baseGPR,
1055                 static_cast<const AtomicStringImpl*>(string->tryGetValueImpl()));
1056             
1057             stubInfo->callSiteIndex = m_jit.addCallSite(node->origin.semantic);
1058             stubInfo->codeOrigin = node->origin.semantic;
1059             stubInfo->patch.baseGPR = static_cast<int8_t>(baseGPR);
1060             stubInfo->patch.valueGPR = static_cast<int8_t>(resultGPR);
1061 #if USE(JSVALUE32_64)
1062             stubInfo->patch.valueTagGPR = static_cast<int8_t>(InvalidGPRReg);
1063             stubInfo->patch.baseTagGPR = static_cast<int8_t>(InvalidGPRReg);
1064 #endif
1065             stubInfo->patch.usedRegisters = usedRegisters();
1066
1067             m_jit.addIn(InRecord(jump, done, slowPath.get(), stubInfo));
1068             addSlowPathGenerator(WTFMove(slowPath));
1069
1070             base.use();
1071
1072             blessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
1073             return;
1074         }
1075     }
1076
1077     JSValueOperand key(this, node->child1());
1078     JSValueRegs regs = key.jsValueRegs();
1079         
1080     GPRFlushedCallResult result(this);
1081     GPRReg resultGPR = result.gpr();
1082         
1083     base.use();
1084     key.use();
1085         
1086     flushRegisters();
1087     callOperation(
1088         operationGenericIn, extractResult(JSValueRegs::payloadOnly(resultGPR)),
1089         baseGPR, regs);
1090     m_jit.exceptionCheck();
1091     blessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
1092 }
1093
1094 void SpeculativeJIT::compileDeleteById(Node* node)
1095 {
1096     JSValueOperand value(this, node->child1());
1097     GPRFlushedCallResult result(this);
1098
1099     JSValueRegs valueRegs = value.jsValueRegs();
1100     GPRReg resultGPR = result.gpr();
1101
1102     value.use();
1103
1104     flushRegisters();
1105     callOperation(operationDeleteById, resultGPR, valueRegs, identifierUID(node->identifierNumber()));
1106     m_jit.exceptionCheck();
1107
1108     unblessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
1109 }
1110
1111 void SpeculativeJIT::compileDeleteByVal(Node* node)
1112 {
1113     JSValueOperand base(this, node->child1());
1114     JSValueOperand key(this, node->child2());
1115     GPRFlushedCallResult result(this);
1116
1117     JSValueRegs baseRegs = base.jsValueRegs();
1118     JSValueRegs keyRegs = key.jsValueRegs();
1119     GPRReg resultGPR = result.gpr();
1120
1121     base.use();
1122     key.use();
1123
1124     flushRegisters();
1125     callOperation(operationDeleteByVal, resultGPR, baseRegs, keyRegs);
1126     m_jit.exceptionCheck();
1127
1128     unblessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
1129 }
1130
1131 bool SpeculativeJIT::nonSpeculativeCompare(Node* node, MacroAssembler::RelationalCondition cond, S_JITOperation_EJJ helperFunction)
1132 {
1133     unsigned branchIndexInBlock = detectPeepHoleBranch();
1134     if (branchIndexInBlock != UINT_MAX) {
1135         Node* branchNode = m_block->at(branchIndexInBlock);
1136
1137         ASSERT(node->adjustedRefCount() == 1);
1138         
1139         nonSpeculativePeepholeBranch(node, branchNode, cond, helperFunction);
1140     
1141         m_indexInBlock = branchIndexInBlock;
1142         m_currentNode = branchNode;
1143         
1144         return true;
1145     }
1146     
1147     nonSpeculativeNonPeepholeCompare(node, cond, helperFunction);
1148     
1149     return false;
1150 }
1151
1152 bool SpeculativeJIT::nonSpeculativeStrictEq(Node* node, bool invert)
1153 {
1154     unsigned branchIndexInBlock = detectPeepHoleBranch();
1155     if (branchIndexInBlock != UINT_MAX) {
1156         Node* branchNode = m_block->at(branchIndexInBlock);
1157
1158         ASSERT(node->adjustedRefCount() == 1);
1159         
1160         nonSpeculativePeepholeStrictEq(node, branchNode, invert);
1161     
1162         m_indexInBlock = branchIndexInBlock;
1163         m_currentNode = branchNode;
1164         
1165         return true;
1166     }
1167     
1168     nonSpeculativeNonPeepholeStrictEq(node, invert);
1169     
1170     return false;
1171 }
1172
1173 static const char* dataFormatString(DataFormat format)
1174 {
1175     // These values correspond to the DataFormat enum.
1176     const char* strings[] = {
1177         "[  ]",
1178         "[ i]",
1179         "[ d]",
1180         "[ c]",
1181         "Err!",
1182         "Err!",
1183         "Err!",
1184         "Err!",
1185         "[J ]",
1186         "[Ji]",
1187         "[Jd]",
1188         "[Jc]",
1189         "Err!",
1190         "Err!",
1191         "Err!",
1192         "Err!",
1193     };
1194     return strings[format];
1195 }
1196
1197 void SpeculativeJIT::dump(const char* label)
1198 {
1199     if (label)
1200         dataLogF("<%s>\n", label);
1201
1202     dataLogF("  gprs:\n");
1203     m_gprs.dump();
1204     dataLogF("  fprs:\n");
1205     m_fprs.dump();
1206     dataLogF("  VirtualRegisters:\n");
1207     for (unsigned i = 0; i < m_generationInfo.size(); ++i) {
1208         GenerationInfo& info = m_generationInfo[i];
1209         if (info.alive())
1210             dataLogF("    % 3d:%s%s", i, dataFormatString(info.registerFormat()), dataFormatString(info.spillFormat()));
1211         else
1212             dataLogF("    % 3d:[__][__]", i);
1213         if (info.registerFormat() == DataFormatDouble)
1214             dataLogF(":fpr%d\n", info.fpr());
1215         else if (info.registerFormat() != DataFormatNone
1216 #if USE(JSVALUE32_64)
1217             && !(info.registerFormat() & DataFormatJS)
1218 #endif
1219             ) {
1220             ASSERT(info.gpr() != InvalidGPRReg);
1221             dataLogF(":%s\n", GPRInfo::debugName(info.gpr()));
1222         } else
1223             dataLogF("\n");
1224     }
1225     if (label)
1226         dataLogF("</%s>\n", label);
1227 }
1228
1229 GPRTemporary::GPRTemporary()
1230     : m_jit(0)
1231     , m_gpr(InvalidGPRReg)
1232 {
1233 }
1234
1235 GPRTemporary::GPRTemporary(SpeculativeJIT* jit)
1236     : m_jit(jit)
1237     , m_gpr(InvalidGPRReg)
1238 {
1239     m_gpr = m_jit->allocate();
1240 }
1241
1242 GPRTemporary::GPRTemporary(SpeculativeJIT* jit, GPRReg specific)
1243     : m_jit(jit)
1244     , m_gpr(InvalidGPRReg)
1245 {
1246     m_gpr = m_jit->allocate(specific);
1247 }
1248
1249 #if USE(JSVALUE32_64)
1250 GPRTemporary::GPRTemporary(
1251     SpeculativeJIT* jit, ReuseTag, JSValueOperand& op1, WhichValueWord which)
1252     : m_jit(jit)
1253     , m_gpr(InvalidGPRReg)
1254 {
1255     if (!op1.isDouble() && m_jit->canReuse(op1.node()))
1256         m_gpr = m_jit->reuse(op1.gpr(which));
1257     else
1258         m_gpr = m_jit->allocate();
1259 }
1260 #endif // USE(JSVALUE32_64)
1261
1262 JSValueRegsTemporary::JSValueRegsTemporary() { }
1263
1264 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit)
1265 #if USE(JSVALUE64)
1266     : m_gpr(jit)
1267 #else
1268     : m_payloadGPR(jit)
1269     , m_tagGPR(jit)
1270 #endif
1271 {
1272 }
1273
1274 #if USE(JSVALUE64)
1275 template<typename T>
1276 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit, ReuseTag, T& operand, WhichValueWord)
1277     : m_gpr(jit, Reuse, operand)
1278 {
1279 }
1280 #else
1281 template<typename T>
1282 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit, ReuseTag, T& operand, WhichValueWord resultWord)
1283 {
1284     if (resultWord == PayloadWord) {
1285         m_payloadGPR = GPRTemporary(jit, Reuse, operand);
1286         m_tagGPR = GPRTemporary(jit);
1287     } else {
1288         m_payloadGPR = GPRTemporary(jit);
1289         m_tagGPR = GPRTemporary(jit, Reuse, operand);
1290     }
1291 }
1292 #endif
1293
1294 #if USE(JSVALUE64)
1295 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit, ReuseTag, JSValueOperand& operand)
1296 {
1297     m_gpr = GPRTemporary(jit, Reuse, operand);
1298 }
1299 #else
1300 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit, ReuseTag, JSValueOperand& operand)
1301 {
1302     if (jit->canReuse(operand.node())) {
1303         m_payloadGPR = GPRTemporary(jit, Reuse, operand, PayloadWord);
1304         m_tagGPR = GPRTemporary(jit, Reuse, operand, TagWord);
1305     } else {
1306         m_payloadGPR = GPRTemporary(jit);
1307         m_tagGPR = GPRTemporary(jit);
1308     }
1309 }
1310 #endif
1311
1312 JSValueRegsTemporary::~JSValueRegsTemporary() { }
1313
1314 JSValueRegs JSValueRegsTemporary::regs()
1315 {
1316 #if USE(JSVALUE64)
1317     return JSValueRegs(m_gpr.gpr());
1318 #else
1319     return JSValueRegs(m_tagGPR.gpr(), m_payloadGPR.gpr());
1320 #endif
1321 }
1322
1323 void GPRTemporary::adopt(GPRTemporary& other)
1324 {
1325     ASSERT(!m_jit);
1326     ASSERT(m_gpr == InvalidGPRReg);
1327     ASSERT(other.m_jit);
1328     ASSERT(other.m_gpr != InvalidGPRReg);
1329     m_jit = other.m_jit;
1330     m_gpr = other.m_gpr;
1331     other.m_jit = 0;
1332     other.m_gpr = InvalidGPRReg;
1333 }
1334
1335 FPRTemporary::FPRTemporary(FPRTemporary&& other)
1336 {
1337     ASSERT(other.m_jit);
1338     ASSERT(other.m_fpr != InvalidFPRReg);
1339     m_jit = other.m_jit;
1340     m_fpr = other.m_fpr;
1341
1342     other.m_jit = nullptr;
1343 }
1344
1345 FPRTemporary::FPRTemporary(SpeculativeJIT* jit)
1346     : m_jit(jit)
1347     , m_fpr(InvalidFPRReg)
1348 {
1349     m_fpr = m_jit->fprAllocate();
1350 }
1351
1352 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1)
1353     : m_jit(jit)
1354     , m_fpr(InvalidFPRReg)
1355 {
1356     if (m_jit->canReuse(op1.node()))
1357         m_fpr = m_jit->reuse(op1.fpr());
1358     else
1359         m_fpr = m_jit->fprAllocate();
1360 }
1361
1362 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1, SpeculateDoubleOperand& op2)
1363     : m_jit(jit)
1364     , m_fpr(InvalidFPRReg)
1365 {
1366     if (m_jit->canReuse(op1.node()))
1367         m_fpr = m_jit->reuse(op1.fpr());
1368     else if (m_jit->canReuse(op2.node()))
1369         m_fpr = m_jit->reuse(op2.fpr());
1370     else if (m_jit->canReuse(op1.node(), op2.node()) && op1.fpr() == op2.fpr())
1371         m_fpr = m_jit->reuse(op1.fpr());
1372     else
1373         m_fpr = m_jit->fprAllocate();
1374 }
1375
1376 #if USE(JSVALUE32_64)
1377 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, JSValueOperand& op1)
1378     : m_jit(jit)
1379     , m_fpr(InvalidFPRReg)
1380 {
1381     if (op1.isDouble() && m_jit->canReuse(op1.node()))
1382         m_fpr = m_jit->reuse(op1.fpr());
1383     else
1384         m_fpr = m_jit->fprAllocate();
1385 }
1386 #endif
1387
1388 void SpeculativeJIT::compilePeepHoleDoubleBranch(Node* node, Node* branchNode, JITCompiler::DoubleCondition condition)
1389 {
1390     BasicBlock* taken = branchNode->branchData()->taken.block;
1391     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1392
1393     if (taken == nextBlock()) {
1394         condition = MacroAssembler::invert(condition);
1395         std::swap(taken, notTaken);
1396     }
1397
1398     SpeculateDoubleOperand op1(this, node->child1());
1399     SpeculateDoubleOperand op2(this, node->child2());
1400     
1401     branchDouble(condition, op1.fpr(), op2.fpr(), taken);
1402     jump(notTaken);
1403 }
1404
1405 void SpeculativeJIT::compilePeepHoleObjectEquality(Node* node, Node* branchNode)
1406 {
1407     BasicBlock* taken = branchNode->branchData()->taken.block;
1408     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1409
1410     MacroAssembler::RelationalCondition condition = MacroAssembler::Equal;
1411     
1412     if (taken == nextBlock()) {
1413         condition = MacroAssembler::NotEqual;
1414         BasicBlock* tmp = taken;
1415         taken = notTaken;
1416         notTaken = tmp;
1417     }
1418
1419     SpeculateCellOperand op1(this, node->child1());
1420     SpeculateCellOperand op2(this, node->child2());
1421     
1422     GPRReg op1GPR = op1.gpr();
1423     GPRReg op2GPR = op2.gpr();
1424     
1425     if (masqueradesAsUndefinedWatchpointIsStillValid()) {
1426         if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1427             speculationCheck(
1428                 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(), m_jit.branchIfNotObject(op1GPR));
1429         }
1430         if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1431             speculationCheck(
1432                 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(), m_jit.branchIfNotObject(op2GPR));
1433         }
1434     } else {
1435         if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1436             speculationCheck(
1437                 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1438                 m_jit.branchIfNotObject(op1GPR));
1439         }
1440         speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1441             m_jit.branchTest8(
1442                 MacroAssembler::NonZero, 
1443                 MacroAssembler::Address(op1GPR, JSCell::typeInfoFlagsOffset()), 
1444                 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1445
1446         if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1447             speculationCheck(
1448                 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1449                 m_jit.branchIfNotObject(op2GPR));
1450         }
1451         speculationCheck(BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1452             m_jit.branchTest8(
1453                 MacroAssembler::NonZero, 
1454                 MacroAssembler::Address(op2GPR, JSCell::typeInfoFlagsOffset()), 
1455                 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1456     }
1457
1458     branchPtr(condition, op1GPR, op2GPR, taken);
1459     jump(notTaken);
1460 }
1461
1462 void SpeculativeJIT::compilePeepHoleBooleanBranch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1463 {
1464     BasicBlock* taken = branchNode->branchData()->taken.block;
1465     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1466
1467     // The branch instruction will branch to the taken block.
1468     // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1469     if (taken == nextBlock()) {
1470         condition = JITCompiler::invert(condition);
1471         BasicBlock* tmp = taken;
1472         taken = notTaken;
1473         notTaken = tmp;
1474     }
1475
1476     if (node->child1()->isInt32Constant()) {
1477         int32_t imm = node->child1()->asInt32();
1478         SpeculateBooleanOperand op2(this, node->child2());
1479         branch32(condition, JITCompiler::Imm32(imm), op2.gpr(), taken);
1480     } else if (node->child2()->isInt32Constant()) {
1481         SpeculateBooleanOperand op1(this, node->child1());
1482         int32_t imm = node->child2()->asInt32();
1483         branch32(condition, op1.gpr(), JITCompiler::Imm32(imm), taken);
1484     } else {
1485         SpeculateBooleanOperand op1(this, node->child1());
1486         SpeculateBooleanOperand op2(this, node->child2());
1487         branch32(condition, op1.gpr(), op2.gpr(), taken);
1488     }
1489
1490     jump(notTaken);
1491 }
1492
1493 void SpeculativeJIT::compilePeepHoleInt32Branch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1494 {
1495     BasicBlock* taken = branchNode->branchData()->taken.block;
1496     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1497
1498     // The branch instruction will branch to the taken block.
1499     // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1500     if (taken == nextBlock()) {
1501         condition = JITCompiler::invert(condition);
1502         BasicBlock* tmp = taken;
1503         taken = notTaken;
1504         notTaken = tmp;
1505     }
1506
1507     if (node->child1()->isInt32Constant()) {
1508         int32_t imm = node->child1()->asInt32();
1509         SpeculateInt32Operand op2(this, node->child2());
1510         branch32(condition, JITCompiler::Imm32(imm), op2.gpr(), taken);
1511     } else if (node->child2()->isInt32Constant()) {
1512         SpeculateInt32Operand op1(this, node->child1());
1513         int32_t imm = node->child2()->asInt32();
1514         branch32(condition, op1.gpr(), JITCompiler::Imm32(imm), taken);
1515     } else {
1516         SpeculateInt32Operand op1(this, node->child1());
1517         SpeculateInt32Operand op2(this, node->child2());
1518         branch32(condition, op1.gpr(), op2.gpr(), taken);
1519     }
1520
1521     jump(notTaken);
1522 }
1523
1524 // Returns true if the compare is fused with a subsequent branch.
1525 bool SpeculativeJIT::compilePeepHoleBranch(Node* node, MacroAssembler::RelationalCondition condition, MacroAssembler::DoubleCondition doubleCondition, S_JITOperation_EJJ operation)
1526 {
1527     // Fused compare & branch.
1528     unsigned branchIndexInBlock = detectPeepHoleBranch();
1529     if (branchIndexInBlock != UINT_MAX) {
1530         Node* branchNode = m_block->at(branchIndexInBlock);
1531
1532         // detectPeepHoleBranch currently only permits the branch to be the very next node,
1533         // so can be no intervening nodes to also reference the compare. 
1534         ASSERT(node->adjustedRefCount() == 1);
1535
1536         if (node->isBinaryUseKind(Int32Use))
1537             compilePeepHoleInt32Branch(node, branchNode, condition);
1538 #if USE(JSVALUE64)
1539         else if (node->isBinaryUseKind(Int52RepUse))
1540             compilePeepHoleInt52Branch(node, branchNode, condition);
1541 #endif // USE(JSVALUE64)
1542         else if (node->isBinaryUseKind(StringUse) || node->isBinaryUseKind(StringIdentUse)) {
1543             // Use non-peephole comparison, for now.
1544             return false;
1545         } else if (node->isBinaryUseKind(DoubleRepUse))
1546             compilePeepHoleDoubleBranch(node, branchNode, doubleCondition);
1547         else if (node->op() == CompareEq) {
1548             if (node->isBinaryUseKind(BooleanUse))
1549                 compilePeepHoleBooleanBranch(node, branchNode, condition);
1550             else if (node->isBinaryUseKind(SymbolUse))
1551                 compilePeepHoleSymbolEquality(node, branchNode);
1552             else if (node->isBinaryUseKind(ObjectUse))
1553                 compilePeepHoleObjectEquality(node, branchNode);
1554             else if (node->isBinaryUseKind(ObjectUse, ObjectOrOtherUse))
1555                 compilePeepHoleObjectToObjectOrOtherEquality(node->child1(), node->child2(), branchNode);
1556             else if (node->isBinaryUseKind(ObjectOrOtherUse, ObjectUse))
1557                 compilePeepHoleObjectToObjectOrOtherEquality(node->child2(), node->child1(), branchNode);
1558             else if (!needsTypeCheck(node->child1(), SpecOther))
1559                 nonSpeculativePeepholeBranchNullOrUndefined(node->child2(), branchNode);
1560             else if (!needsTypeCheck(node->child2(), SpecOther))
1561                 nonSpeculativePeepholeBranchNullOrUndefined(node->child1(), branchNode);
1562             else {
1563                 nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1564                 return true;
1565             }
1566         } else {
1567             nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1568             return true;
1569         }
1570
1571         use(node->child1());
1572         use(node->child2());
1573         m_indexInBlock = branchIndexInBlock;
1574         m_currentNode = branchNode;
1575         return true;
1576     }
1577     return false;
1578 }
1579
1580 void SpeculativeJIT::noticeOSRBirth(Node* node)
1581 {
1582     if (!node->hasVirtualRegister())
1583         return;
1584     
1585     VirtualRegister virtualRegister = node->virtualRegister();
1586     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1587     
1588     info.noticeOSRBirth(*m_stream, node, virtualRegister);
1589 }
1590
1591 void SpeculativeJIT::compileMovHint(Node* node)
1592 {
1593     ASSERT(node->containsMovHint() && node->op() != ZombieHint);
1594     
1595     Node* child = node->child1().node();
1596     noticeOSRBirth(child);
1597     
1598     m_stream->appendAndLog(VariableEvent::movHint(MinifiedID(child), node->unlinkedLocal()));
1599 }
1600
1601 void SpeculativeJIT::bail(AbortReason reason)
1602 {
1603     if (verboseCompilationEnabled())
1604         dataLog("Bailing compilation.\n");
1605     m_compileOkay = true;
1606     m_jit.abortWithReason(reason, m_lastGeneratedNode);
1607     clearGenerationInfo();
1608 }
1609
1610 void SpeculativeJIT::compileCurrentBlock()
1611 {
1612     ASSERT(m_compileOkay);
1613     
1614     if (!m_block)
1615         return;
1616     
1617     ASSERT(m_block->isReachable);
1618     
1619     m_jit.blockHeads()[m_block->index] = m_jit.label();
1620
1621     if (!m_block->intersectionOfCFAHasVisited) {
1622         // Don't generate code for basic blocks that are unreachable according to CFA.
1623         // But to be sure that nobody has generated a jump to this block, drop in a
1624         // breakpoint here.
1625         m_jit.abortWithReason(DFGUnreachableBasicBlock);
1626         return;
1627     }
1628
1629     m_stream->appendAndLog(VariableEvent::reset());
1630     
1631     m_jit.jitAssertHasValidCallFrame();
1632     m_jit.jitAssertTagsInPlace();
1633     m_jit.jitAssertArgumentCountSane();
1634
1635     m_state.reset();
1636     m_state.beginBasicBlock(m_block);
1637     
1638     for (size_t i = m_block->variablesAtHead.size(); i--;) {
1639         int operand = m_block->variablesAtHead.operandForIndex(i);
1640         Node* node = m_block->variablesAtHead[i];
1641         if (!node)
1642             continue; // No need to record dead SetLocal's.
1643         
1644         VariableAccessData* variable = node->variableAccessData();
1645         DataFormat format;
1646         if (!node->refCount())
1647             continue; // No need to record dead SetLocal's.
1648         format = dataFormatFor(variable->flushFormat());
1649         m_stream->appendAndLog(
1650             VariableEvent::setLocal(
1651                 VirtualRegister(operand),
1652                 variable->machineLocal(),
1653                 format));
1654     }
1655
1656     m_origin = NodeOrigin();
1657     
1658     for (m_indexInBlock = 0; m_indexInBlock < m_block->size(); ++m_indexInBlock) {
1659         m_currentNode = m_block->at(m_indexInBlock);
1660         
1661         // We may have hit a contradiction that the CFA was aware of but that the JIT
1662         // didn't cause directly.
1663         if (!m_state.isValid()) {
1664             bail(DFGBailedAtTopOfBlock);
1665             return;
1666         }
1667
1668         m_interpreter.startExecuting();
1669         m_interpreter.executeKnownEdgeTypes(m_currentNode);
1670         m_jit.setForNode(m_currentNode);
1671         m_origin = m_currentNode->origin;
1672         if (validationEnabled())
1673             m_origin.exitOK &= mayExit(m_jit.graph(), m_currentNode) == Exits;
1674         m_lastGeneratedNode = m_currentNode->op();
1675         
1676         ASSERT(m_currentNode->shouldGenerate());
1677         
1678         if (verboseCompilationEnabled()) {
1679             dataLogF(
1680                 "SpeculativeJIT generating Node @%d (bc#%u) at JIT offset 0x%x",
1681                 (int)m_currentNode->index(),
1682                 m_currentNode->origin.semantic.bytecodeIndex, m_jit.debugOffset());
1683             dataLog("\n");
1684         }
1685
1686         if (Options::validateDFGExceptionHandling() && (mayExit(m_jit.graph(), m_currentNode) != DoesNotExit || m_currentNode->isTerminal()))
1687             m_jit.jitReleaseAssertNoException();
1688
1689         m_jit.pcToCodeOriginMapBuilder().appendItem(m_jit.label(), m_origin.semantic);
1690
1691         compile(m_currentNode);
1692         
1693         if (belongsInMinifiedGraph(m_currentNode->op()))
1694             m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode));
1695         
1696 #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
1697         m_jit.clearRegisterAllocationOffsets();
1698 #endif
1699         
1700         if (!m_compileOkay) {
1701             bail(DFGBailedAtEndOfNode);
1702             return;
1703         }
1704         
1705         // Make sure that the abstract state is rematerialized for the next node.
1706         m_interpreter.executeEffects(m_indexInBlock);
1707     }
1708     
1709     // Perform the most basic verification that children have been used correctly.
1710     if (!ASSERT_DISABLED) {
1711         for (unsigned index = 0; index < m_generationInfo.size(); ++index) {
1712             GenerationInfo& info = m_generationInfo[index];
1713             RELEASE_ASSERT(!info.alive());
1714         }
1715     }
1716 }
1717
1718 // If we are making type predictions about our arguments then
1719 // we need to check that they are correct on function entry.
1720 void SpeculativeJIT::checkArgumentTypes()
1721 {
1722     ASSERT(!m_currentNode);
1723     m_origin = NodeOrigin(CodeOrigin(0), CodeOrigin(0), true);
1724
1725     for (int i = 0; i < m_jit.codeBlock()->numParameters(); ++i) {
1726         Node* node = m_jit.graph().m_arguments[i];
1727         if (!node) {
1728             // The argument is dead. We don't do any checks for such arguments.
1729             continue;
1730         }
1731         
1732         ASSERT(node->op() == SetArgument);
1733         ASSERT(node->shouldGenerate());
1734
1735         VariableAccessData* variableAccessData = node->variableAccessData();
1736         FlushFormat format = variableAccessData->flushFormat();
1737         
1738         if (format == FlushedJSValue)
1739             continue;
1740         
1741         VirtualRegister virtualRegister = variableAccessData->local();
1742
1743         JSValueSource valueSource = JSValueSource(JITCompiler::addressFor(virtualRegister));
1744         
1745 #if USE(JSVALUE64)
1746         switch (format) {
1747         case FlushedInt32: {
1748             speculationCheck(BadType, valueSource, node, m_jit.branch64(MacroAssembler::Below, JITCompiler::addressFor(virtualRegister), GPRInfo::tagTypeNumberRegister));
1749             break;
1750         }
1751         case FlushedBoolean: {
1752             GPRTemporary temp(this);
1753             m_jit.load64(JITCompiler::addressFor(virtualRegister), temp.gpr());
1754             m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), temp.gpr());
1755             speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, temp.gpr(), TrustedImm32(static_cast<int32_t>(~1))));
1756             break;
1757         }
1758         case FlushedCell: {
1759             speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, JITCompiler::addressFor(virtualRegister), GPRInfo::tagMaskRegister));
1760             break;
1761         }
1762         default:
1763             RELEASE_ASSERT_NOT_REACHED();
1764             break;
1765         }
1766 #else
1767         switch (format) {
1768         case FlushedInt32: {
1769             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::Int32Tag)));
1770             break;
1771         }
1772         case FlushedBoolean: {
1773             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::BooleanTag)));
1774             break;
1775         }
1776         case FlushedCell: {
1777             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::CellTag)));
1778             break;
1779         }
1780         default:
1781             RELEASE_ASSERT_NOT_REACHED();
1782             break;
1783         }
1784 #endif
1785     }
1786
1787     m_origin = NodeOrigin();
1788 }
1789
1790 bool SpeculativeJIT::compile()
1791 {
1792     checkArgumentTypes();
1793     
1794     ASSERT(!m_currentNode);
1795     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1796         m_jit.setForBlockIndex(blockIndex);
1797         m_block = m_jit.graph().block(blockIndex);
1798         compileCurrentBlock();
1799     }
1800     linkBranches();
1801     return true;
1802 }
1803
1804 void SpeculativeJIT::createOSREntries()
1805 {
1806     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1807         BasicBlock* block = m_jit.graph().block(blockIndex);
1808         if (!block)
1809             continue;
1810         if (!block->isOSRTarget)
1811             continue;
1812         
1813         // Currently we don't have OSR entry trampolines. We could add them
1814         // here if need be.
1815         m_osrEntryHeads.append(m_jit.blockHeads()[blockIndex]);
1816     }
1817 }
1818
1819 void SpeculativeJIT::linkOSREntries(LinkBuffer& linkBuffer)
1820 {
1821     unsigned osrEntryIndex = 0;
1822     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1823         BasicBlock* block = m_jit.graph().block(blockIndex);
1824         if (!block)
1825             continue;
1826         if (!block->isOSRTarget)
1827             continue;
1828         m_jit.noticeOSREntry(*block, m_osrEntryHeads[osrEntryIndex++], linkBuffer);
1829     }
1830     ASSERT(osrEntryIndex == m_osrEntryHeads.size());
1831     
1832     if (verboseCompilationEnabled()) {
1833         DumpContext dumpContext;
1834         dataLog("OSR Entries:\n");
1835         for (OSREntryData& entryData : m_jit.jitCode()->osrEntry)
1836             dataLog("    ", inContext(entryData, &dumpContext), "\n");
1837         if (!dumpContext.isEmpty())
1838             dumpContext.dump(WTF::dataFile());
1839     }
1840 }
1841
1842 void SpeculativeJIT::compileDoublePutByVal(Node* node, SpeculateCellOperand& base, SpeculateStrictInt32Operand& property)
1843 {
1844     Edge child3 = m_jit.graph().varArgChild(node, 2);
1845     Edge child4 = m_jit.graph().varArgChild(node, 3);
1846
1847     ArrayMode arrayMode = node->arrayMode();
1848     
1849     GPRReg baseReg = base.gpr();
1850     GPRReg propertyReg = property.gpr();
1851     
1852     SpeculateDoubleOperand value(this, child3);
1853
1854     FPRReg valueReg = value.fpr();
1855     
1856     DFG_TYPE_CHECK(
1857         JSValueRegs(), child3, SpecFullRealNumber,
1858         m_jit.branchDouble(
1859             MacroAssembler::DoubleNotEqualOrUnordered, valueReg, valueReg));
1860     
1861     if (!m_compileOkay)
1862         return;
1863     
1864     StorageOperand storage(this, child4);
1865     GPRReg storageReg = storage.gpr();
1866
1867     if (node->op() == PutByValAlias) {
1868         // Store the value to the array.
1869         GPRReg propertyReg = property.gpr();
1870         FPRReg valueReg = value.fpr();
1871         m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
1872         
1873         noResult(m_currentNode);
1874         return;
1875     }
1876     
1877     GPRTemporary temporary;
1878     GPRReg temporaryReg = temporaryRegisterForPutByVal(temporary, node);
1879
1880     MacroAssembler::Jump slowCase;
1881     
1882     if (arrayMode.isInBounds()) {
1883         speculationCheck(
1884             OutOfBounds, JSValueRegs(), 0,
1885             m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())));
1886     } else {
1887         MacroAssembler::Jump inBounds = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
1888         
1889         slowCase = m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfVectorLength()));
1890         
1891         if (!arrayMode.isOutOfBounds())
1892             speculationCheck(OutOfBounds, JSValueRegs(), 0, slowCase);
1893         
1894         m_jit.add32(TrustedImm32(1), propertyReg, temporaryReg);
1895         m_jit.store32(temporaryReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
1896         
1897         inBounds.link(&m_jit);
1898     }
1899     
1900     m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
1901
1902     base.use();
1903     property.use();
1904     value.use();
1905     storage.use();
1906     
1907     if (arrayMode.isOutOfBounds()) {
1908         addSlowPathGenerator(
1909             slowPathCall(
1910                 slowCase, this,
1911                 m_jit.codeBlock()->isStrictMode() ? operationPutDoubleByValBeyondArrayBoundsStrict : operationPutDoubleByValBeyondArrayBoundsNonStrict,
1912                 NoResult, baseReg, propertyReg, valueReg));
1913     }
1914
1915     noResult(m_currentNode, UseChildrenCalledExplicitly);
1916 }
1917
1918 void SpeculativeJIT::compileGetCharCodeAt(Node* node)
1919 {
1920     SpeculateCellOperand string(this, node->child1());
1921     SpeculateStrictInt32Operand index(this, node->child2());
1922     StorageOperand storage(this, node->child3());
1923
1924     GPRReg stringReg = string.gpr();
1925     GPRReg indexReg = index.gpr();
1926     GPRReg storageReg = storage.gpr();
1927     
1928     ASSERT(speculationChecked(m_state.forNode(node->child1()).m_type, SpecString));
1929
1930     // unsigned comparison so we can filter out negative indices and indices that are too large
1931     speculationCheck(Uncountable, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::AboveOrEqual, indexReg, MacroAssembler::Address(stringReg, JSString::offsetOfLength())));
1932
1933     GPRTemporary scratch(this);
1934     GPRReg scratchReg = scratch.gpr();
1935
1936     m_jit.loadPtr(MacroAssembler::Address(stringReg, JSString::offsetOfValue()), scratchReg);
1937
1938     // Load the character into scratchReg
1939     JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
1940
1941     m_jit.load8(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesOne, 0), scratchReg);
1942     JITCompiler::Jump cont8Bit = m_jit.jump();
1943
1944     is16Bit.link(&m_jit);
1945
1946     m_jit.load16(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesTwo, 0), scratchReg);
1947
1948     cont8Bit.link(&m_jit);
1949
1950     int32Result(scratchReg, m_currentNode);
1951 }
1952
1953 void SpeculativeJIT::compileGetByValOnString(Node* node)
1954 {
1955     SpeculateCellOperand base(this, node->child1());
1956     SpeculateStrictInt32Operand property(this, node->child2());
1957     StorageOperand storage(this, node->child3());
1958     GPRReg baseReg = base.gpr();
1959     GPRReg propertyReg = property.gpr();
1960     GPRReg storageReg = storage.gpr();
1961
1962     GPRTemporary scratch(this);
1963     GPRReg scratchReg = scratch.gpr();
1964 #if USE(JSVALUE32_64)
1965     GPRTemporary resultTag;
1966     GPRReg resultTagReg = InvalidGPRReg;
1967     if (node->arrayMode().isOutOfBounds()) {
1968         GPRTemporary realResultTag(this);
1969         resultTag.adopt(realResultTag);
1970         resultTagReg = resultTag.gpr();
1971     }
1972 #endif
1973
1974     ASSERT(ArrayMode(Array::String).alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
1975
1976     // unsigned comparison so we can filter out negative indices and indices that are too large
1977     JITCompiler::Jump outOfBounds = m_jit.branch32(
1978         MacroAssembler::AboveOrEqual, propertyReg,
1979         MacroAssembler::Address(baseReg, JSString::offsetOfLength()));
1980     if (node->arrayMode().isInBounds())
1981         speculationCheck(OutOfBounds, JSValueRegs(), 0, outOfBounds);
1982
1983     m_jit.loadPtr(MacroAssembler::Address(baseReg, JSString::offsetOfValue()), scratchReg);
1984
1985     // Load the character into scratchReg
1986     JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
1987
1988     m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne, 0), scratchReg);
1989     JITCompiler::Jump cont8Bit = m_jit.jump();
1990
1991     is16Bit.link(&m_jit);
1992
1993     m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo, 0), scratchReg);
1994
1995     JITCompiler::Jump bigCharacter =
1996         m_jit.branch32(MacroAssembler::AboveOrEqual, scratchReg, TrustedImm32(0x100));
1997
1998     // 8 bit string values don't need the isASCII check.
1999     cont8Bit.link(&m_jit);
2000
2001     m_jit.lshift32(MacroAssembler::TrustedImm32(sizeof(void*) == 4 ? 2 : 3), scratchReg);
2002     m_jit.addPtr(MacroAssembler::TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), scratchReg);
2003     m_jit.loadPtr(scratchReg, scratchReg);
2004
2005     addSlowPathGenerator(
2006         slowPathCall(
2007             bigCharacter, this, operationSingleCharacterString, scratchReg, scratchReg));
2008
2009     if (node->arrayMode().isOutOfBounds()) {
2010 #if USE(JSVALUE32_64)
2011         m_jit.move(TrustedImm32(JSValue::CellTag), resultTagReg);
2012 #endif
2013
2014         JSGlobalObject* globalObject = m_jit.globalObjectFor(node->origin.semantic);
2015         bool prototypeChainIsSane = false;
2016         if (globalObject->stringPrototypeChainIsSane()) {
2017             // FIXME: This could be captured using a Speculation mode that means "out-of-bounds
2018             // loads return a trivial value". Something like SaneChainOutOfBounds. This should
2019             // speculate that we don't take negative out-of-bounds, or better yet, it should rely
2020             // on a stringPrototypeChainIsSane() guaranteeing that the prototypes have no negative
2021             // indexed properties either.
2022             // https://bugs.webkit.org/show_bug.cgi?id=144668
2023             m_jit.graph().watchpoints().addLazily(globalObject->stringPrototype()->structure()->transitionWatchpointSet());
2024             m_jit.graph().watchpoints().addLazily(globalObject->objectPrototype()->structure()->transitionWatchpointSet());
2025             prototypeChainIsSane = globalObject->stringPrototypeChainIsSane();
2026         }
2027         if (prototypeChainIsSane) {
2028             m_jit.graph().watchpoints().addLazily(globalObject->stringPrototype()->structure()->transitionWatchpointSet());
2029             m_jit.graph().watchpoints().addLazily(globalObject->objectPrototype()->structure()->transitionWatchpointSet());
2030             
2031 #if USE(JSVALUE64)
2032             addSlowPathGenerator(std::make_unique<SaneStringGetByValSlowPathGenerator>(
2033                 outOfBounds, this, JSValueRegs(scratchReg), baseReg, propertyReg));
2034 #else
2035             addSlowPathGenerator(std::make_unique<SaneStringGetByValSlowPathGenerator>(
2036                 outOfBounds, this, JSValueRegs(resultTagReg, scratchReg),
2037                 baseReg, propertyReg));
2038 #endif
2039         } else {
2040 #if USE(JSVALUE64)
2041             addSlowPathGenerator(
2042                 slowPathCall(
2043                     outOfBounds, this, operationGetByValStringInt,
2044                     scratchReg, baseReg, propertyReg));
2045 #else
2046             addSlowPathGenerator(
2047                 slowPathCall(
2048                     outOfBounds, this, operationGetByValStringInt,
2049                     JSValueRegs(resultTagReg, scratchReg), baseReg, propertyReg));
2050 #endif
2051         }
2052         
2053 #if USE(JSVALUE64)
2054         jsValueResult(scratchReg, m_currentNode);
2055 #else
2056         jsValueResult(resultTagReg, scratchReg, m_currentNode);
2057 #endif
2058     } else
2059         cellResult(scratchReg, m_currentNode);
2060 }
2061
2062 void SpeculativeJIT::compileFromCharCode(Node* node)
2063 {
2064     Edge& child = node->child1();
2065     if (child.useKind() == UntypedUse) {
2066         JSValueOperand opr(this, child);
2067         JSValueRegs oprRegs = opr.jsValueRegs();
2068 #if USE(JSVALUE64)
2069         GPRTemporary result(this);
2070         JSValueRegs resultRegs = JSValueRegs(result.gpr());
2071 #else
2072         GPRTemporary resultTag(this);
2073         GPRTemporary resultPayload(this);
2074         JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
2075 #endif
2076         flushRegisters();
2077         callOperation(operationStringFromCharCodeUntyped, resultRegs, oprRegs);
2078         m_jit.exceptionCheck();
2079         
2080         jsValueResult(resultRegs, node);
2081         return;
2082     }
2083
2084     SpeculateStrictInt32Operand property(this, child);
2085     GPRReg propertyReg = property.gpr();
2086     GPRTemporary smallStrings(this);
2087     GPRTemporary scratch(this);
2088     GPRReg scratchReg = scratch.gpr();
2089     GPRReg smallStringsReg = smallStrings.gpr();
2090
2091     JITCompiler::JumpList slowCases;
2092     slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, TrustedImm32(0xff)));
2093     m_jit.move(MacroAssembler::TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), smallStringsReg);
2094     m_jit.loadPtr(MacroAssembler::BaseIndex(smallStringsReg, propertyReg, MacroAssembler::ScalePtr, 0), scratchReg);
2095
2096     slowCases.append(m_jit.branchTest32(MacroAssembler::Zero, scratchReg));
2097     addSlowPathGenerator(slowPathCall(slowCases, this, operationStringFromCharCode, scratchReg, propertyReg));
2098     cellResult(scratchReg, m_currentNode);
2099 }
2100
2101 GeneratedOperandType SpeculativeJIT::checkGeneratedTypeForToInt32(Node* node)
2102 {
2103     VirtualRegister virtualRegister = node->virtualRegister();
2104     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
2105
2106     switch (info.registerFormat()) {
2107     case DataFormatStorage:
2108         RELEASE_ASSERT_NOT_REACHED();
2109
2110     case DataFormatBoolean:
2111     case DataFormatCell:
2112         terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
2113         return GeneratedOperandTypeUnknown;
2114
2115     case DataFormatNone:
2116     case DataFormatJSCell:
2117     case DataFormatJS:
2118     case DataFormatJSBoolean:
2119     case DataFormatJSDouble:
2120         return GeneratedOperandJSValue;
2121
2122     case DataFormatJSInt32:
2123     case DataFormatInt32:
2124         return GeneratedOperandInteger;
2125
2126     default:
2127         RELEASE_ASSERT_NOT_REACHED();
2128         return GeneratedOperandTypeUnknown;
2129     }
2130 }
2131
2132 void SpeculativeJIT::compileValueToInt32(Node* node)
2133 {
2134     switch (node->child1().useKind()) {
2135 #if USE(JSVALUE64)
2136     case Int52RepUse: {
2137         SpeculateStrictInt52Operand op1(this, node->child1());
2138         GPRTemporary result(this, Reuse, op1);
2139         GPRReg op1GPR = op1.gpr();
2140         GPRReg resultGPR = result.gpr();
2141         m_jit.zeroExtend32ToPtr(op1GPR, resultGPR);
2142         int32Result(resultGPR, node, DataFormatInt32);
2143         return;
2144     }
2145 #endif // USE(JSVALUE64)
2146         
2147     case DoubleRepUse: {
2148         GPRTemporary result(this);
2149         SpeculateDoubleOperand op1(this, node->child1());
2150         FPRReg fpr = op1.fpr();
2151         GPRReg gpr = result.gpr();
2152         JITCompiler::Jump notTruncatedToInteger = m_jit.branchTruncateDoubleToInt32(fpr, gpr, JITCompiler::BranchIfTruncateFailed);
2153         
2154         addSlowPathGenerator(slowPathCall(notTruncatedToInteger, this, operationToInt32, NeedToSpill, ExceptionCheckRequirement::CheckNotNeeded, gpr, fpr));
2155         
2156         int32Result(gpr, node);
2157         return;
2158     }
2159     
2160     case NumberUse:
2161     case NotCellUse: {
2162         switch (checkGeneratedTypeForToInt32(node->child1().node())) {
2163         case GeneratedOperandInteger: {
2164             SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
2165             GPRTemporary result(this, Reuse, op1);
2166             m_jit.move(op1.gpr(), result.gpr());
2167             int32Result(result.gpr(), node, op1.format());
2168             return;
2169         }
2170         case GeneratedOperandJSValue: {
2171             GPRTemporary result(this);
2172 #if USE(JSVALUE64)
2173             JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2174
2175             GPRReg gpr = op1.gpr();
2176             GPRReg resultGpr = result.gpr();
2177             FPRTemporary tempFpr(this);
2178             FPRReg fpr = tempFpr.fpr();
2179
2180             JITCompiler::Jump isInteger = m_jit.branch64(MacroAssembler::AboveOrEqual, gpr, GPRInfo::tagTypeNumberRegister);
2181             JITCompiler::JumpList converted;
2182
2183             if (node->child1().useKind() == NumberUse) {
2184                 DFG_TYPE_CHECK(
2185                     JSValueRegs(gpr), node->child1(), SpecBytecodeNumber,
2186                     m_jit.branchTest64(
2187                         MacroAssembler::Zero, gpr, GPRInfo::tagTypeNumberRegister));
2188             } else {
2189                 JITCompiler::Jump isNumber = m_jit.branchTest64(MacroAssembler::NonZero, gpr, GPRInfo::tagTypeNumberRegister);
2190                 
2191                 DFG_TYPE_CHECK(
2192                     JSValueRegs(gpr), node->child1(), ~SpecCell, m_jit.branchIfCell(JSValueRegs(gpr)));
2193                 
2194                 // It's not a cell: so true turns into 1 and all else turns into 0.
2195                 m_jit.compare64(JITCompiler::Equal, gpr, TrustedImm32(ValueTrue), resultGpr);
2196                 converted.append(m_jit.jump());
2197                 
2198                 isNumber.link(&m_jit);
2199             }
2200
2201             // First, if we get here we have a double encoded as a JSValue
2202             unboxDouble(gpr, resultGpr, fpr);
2203
2204             silentSpillAllRegisters(resultGpr);
2205             callOperation(operationToInt32, resultGpr, fpr);
2206             silentFillAllRegisters(resultGpr);
2207
2208             converted.append(m_jit.jump());
2209
2210             isInteger.link(&m_jit);
2211             m_jit.zeroExtend32ToPtr(gpr, resultGpr);
2212
2213             converted.link(&m_jit);
2214 #else
2215             Node* childNode = node->child1().node();
2216             VirtualRegister virtualRegister = childNode->virtualRegister();
2217             GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
2218
2219             JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2220
2221             GPRReg payloadGPR = op1.payloadGPR();
2222             GPRReg resultGpr = result.gpr();
2223         
2224             JITCompiler::JumpList converted;
2225
2226             if (info.registerFormat() == DataFormatJSInt32)
2227                 m_jit.move(payloadGPR, resultGpr);
2228             else {
2229                 GPRReg tagGPR = op1.tagGPR();
2230                 FPRTemporary tempFpr(this);
2231                 FPRReg fpr = tempFpr.fpr();
2232                 FPRTemporary scratch(this);
2233
2234                 JITCompiler::Jump isInteger = m_jit.branch32(MacroAssembler::Equal, tagGPR, TrustedImm32(JSValue::Int32Tag));
2235
2236                 if (node->child1().useKind() == NumberUse) {
2237                     DFG_TYPE_CHECK(
2238                         op1.jsValueRegs(), node->child1(), SpecBytecodeNumber,
2239                         m_jit.branch32(
2240                             MacroAssembler::AboveOrEqual, tagGPR,
2241                             TrustedImm32(JSValue::LowestTag)));
2242                 } else {
2243                     JITCompiler::Jump isNumber = m_jit.branch32(MacroAssembler::Below, tagGPR, TrustedImm32(JSValue::LowestTag));
2244                     
2245                     DFG_TYPE_CHECK(
2246                         op1.jsValueRegs(), node->child1(), ~SpecCell,
2247                         m_jit.branchIfCell(op1.jsValueRegs()));
2248                     
2249                     // It's not a cell: so true turns into 1 and all else turns into 0.
2250                     JITCompiler::Jump isBoolean = m_jit.branch32(JITCompiler::Equal, tagGPR, TrustedImm32(JSValue::BooleanTag));
2251                     m_jit.move(TrustedImm32(0), resultGpr);
2252                     converted.append(m_jit.jump());
2253                     
2254                     isBoolean.link(&m_jit);
2255                     m_jit.move(payloadGPR, resultGpr);
2256                     converted.append(m_jit.jump());
2257                     
2258                     isNumber.link(&m_jit);
2259                 }
2260
2261                 unboxDouble(tagGPR, payloadGPR, fpr, scratch.fpr());
2262
2263                 silentSpillAllRegisters(resultGpr);
2264                 callOperation(operationToInt32, resultGpr, fpr);
2265                 silentFillAllRegisters(resultGpr);
2266
2267                 converted.append(m_jit.jump());
2268
2269                 isInteger.link(&m_jit);
2270                 m_jit.move(payloadGPR, resultGpr);
2271
2272                 converted.link(&m_jit);
2273             }
2274 #endif
2275             int32Result(resultGpr, node);
2276             return;
2277         }
2278         case GeneratedOperandTypeUnknown:
2279             RELEASE_ASSERT(!m_compileOkay);
2280             return;
2281         }
2282         RELEASE_ASSERT_NOT_REACHED();
2283         return;
2284     }
2285     
2286     default:
2287         ASSERT(!m_compileOkay);
2288         return;
2289     }
2290 }
2291
2292 void SpeculativeJIT::compileUInt32ToNumber(Node* node)
2293 {
2294     if (doesOverflow(node->arithMode())) {
2295         if (enableInt52()) {
2296             SpeculateInt32Operand op1(this, node->child1());
2297             GPRTemporary result(this, Reuse, op1);
2298             m_jit.zeroExtend32ToPtr(op1.gpr(), result.gpr());
2299             strictInt52Result(result.gpr(), node);
2300             return;
2301         }
2302         SpeculateInt32Operand op1(this, node->child1());
2303         FPRTemporary result(this);
2304             
2305         GPRReg inputGPR = op1.gpr();
2306         FPRReg outputFPR = result.fpr();
2307             
2308         m_jit.convertInt32ToDouble(inputGPR, outputFPR);
2309             
2310         JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, inputGPR, TrustedImm32(0));
2311         m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), outputFPR);
2312         positive.link(&m_jit);
2313             
2314         doubleResult(outputFPR, node);
2315         return;
2316     }
2317     
2318     RELEASE_ASSERT(node->arithMode() == Arith::CheckOverflow);
2319
2320     SpeculateInt32Operand op1(this, node->child1());
2321     GPRTemporary result(this);
2322
2323     m_jit.move(op1.gpr(), result.gpr());
2324
2325     speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, result.gpr(), TrustedImm32(0)));
2326
2327     int32Result(result.gpr(), node, op1.format());
2328 }
2329
2330 void SpeculativeJIT::compileDoubleAsInt32(Node* node)
2331 {
2332     SpeculateDoubleOperand op1(this, node->child1());
2333     FPRTemporary scratch(this);
2334     GPRTemporary result(this);
2335     
2336     FPRReg valueFPR = op1.fpr();
2337     FPRReg scratchFPR = scratch.fpr();
2338     GPRReg resultGPR = result.gpr();
2339
2340     JITCompiler::JumpList failureCases;
2341     RELEASE_ASSERT(shouldCheckOverflow(node->arithMode()));
2342     m_jit.branchConvertDoubleToInt32(
2343         valueFPR, resultGPR, failureCases, scratchFPR,
2344         shouldCheckNegativeZero(node->arithMode()));
2345     speculationCheck(Overflow, JSValueRegs(), 0, failureCases);
2346
2347     int32Result(resultGPR, node);
2348 }
2349
2350 void SpeculativeJIT::compileDoubleRep(Node* node)
2351 {
2352     switch (node->child1().useKind()) {
2353     case RealNumberUse: {
2354         JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2355         FPRTemporary result(this);
2356         
2357         JSValueRegs op1Regs = op1.jsValueRegs();
2358         FPRReg resultFPR = result.fpr();
2359         
2360 #if USE(JSVALUE64)
2361         GPRTemporary temp(this);
2362         GPRReg tempGPR = temp.gpr();
2363         m_jit.unboxDoubleWithoutAssertions(op1Regs.gpr(), tempGPR, resultFPR);
2364 #else
2365         FPRTemporary temp(this);
2366         FPRReg tempFPR = temp.fpr();
2367         unboxDouble(op1Regs.tagGPR(), op1Regs.payloadGPR(), resultFPR, tempFPR);
2368 #endif
2369         
2370         JITCompiler::Jump done = m_jit.branchDouble(
2371             JITCompiler::DoubleEqual, resultFPR, resultFPR);
2372         
2373         DFG_TYPE_CHECK(
2374             op1Regs, node->child1(), SpecBytecodeRealNumber, m_jit.branchIfNotInt32(op1Regs));
2375         m_jit.convertInt32ToDouble(op1Regs.payloadGPR(), resultFPR);
2376         
2377         done.link(&m_jit);
2378         
2379         doubleResult(resultFPR, node);
2380         return;
2381     }
2382     
2383     case NotCellUse:
2384     case NumberUse: {
2385         ASSERT(!node->child1()->isNumberConstant()); // This should have been constant folded.
2386
2387         SpeculatedType possibleTypes = m_state.forNode(node->child1()).m_type;
2388         if (isInt32Speculation(possibleTypes)) {
2389             SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
2390             FPRTemporary result(this);
2391             m_jit.convertInt32ToDouble(op1.gpr(), result.fpr());
2392             doubleResult(result.fpr(), node);
2393             return;
2394         }
2395
2396         JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2397         FPRTemporary result(this);
2398
2399 #if USE(JSVALUE64)
2400         GPRTemporary temp(this);
2401
2402         GPRReg op1GPR = op1.gpr();
2403         GPRReg tempGPR = temp.gpr();
2404         FPRReg resultFPR = result.fpr();
2405         JITCompiler::JumpList done;
2406
2407         JITCompiler::Jump isInteger = m_jit.branch64(
2408             MacroAssembler::AboveOrEqual, op1GPR, GPRInfo::tagTypeNumberRegister);
2409
2410         if (node->child1().useKind() == NotCellUse) {
2411             JITCompiler::Jump isNumber = m_jit.branchTest64(MacroAssembler::NonZero, op1GPR, GPRInfo::tagTypeNumberRegister);
2412             JITCompiler::Jump isUndefined = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueUndefined));
2413
2414             static const double zero = 0;
2415             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&zero), resultFPR);
2416
2417             JITCompiler::Jump isNull = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueNull));
2418             done.append(isNull);
2419
2420             DFG_TYPE_CHECK(JSValueRegs(op1GPR), node->child1(), ~SpecCell,
2421                 m_jit.branchTest64(JITCompiler::Zero, op1GPR, TrustedImm32(static_cast<int32_t>(TagBitBool))));
2422
2423             JITCompiler::Jump isFalse = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueFalse));
2424             static const double one = 1;
2425             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&one), resultFPR);
2426             done.append(m_jit.jump());
2427             done.append(isFalse);
2428
2429             isUndefined.link(&m_jit);
2430             static const double NaN = PNaN;
2431             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&NaN), resultFPR);
2432             done.append(m_jit.jump());
2433
2434             isNumber.link(&m_jit);
2435         } else if (needsTypeCheck(node->child1(), SpecBytecodeNumber)) {
2436             typeCheck(
2437                 JSValueRegs(op1GPR), node->child1(), SpecBytecodeNumber,
2438                 m_jit.branchTest64(MacroAssembler::Zero, op1GPR, GPRInfo::tagTypeNumberRegister));
2439         }
2440
2441         unboxDouble(op1GPR, tempGPR, resultFPR);
2442         done.append(m_jit.jump());
2443     
2444         isInteger.link(&m_jit);
2445         m_jit.convertInt32ToDouble(op1GPR, resultFPR);
2446         done.link(&m_jit);
2447 #else // USE(JSVALUE64) -> this is the 32_64 case
2448         FPRTemporary temp(this);
2449     
2450         GPRReg op1TagGPR = op1.tagGPR();
2451         GPRReg op1PayloadGPR = op1.payloadGPR();
2452         FPRReg tempFPR = temp.fpr();
2453         FPRReg resultFPR = result.fpr();
2454         JITCompiler::JumpList done;
2455     
2456         JITCompiler::Jump isInteger = m_jit.branch32(
2457             MacroAssembler::Equal, op1TagGPR, TrustedImm32(JSValue::Int32Tag));
2458
2459         if (node->child1().useKind() == NotCellUse) {
2460             JITCompiler::Jump isNumber = m_jit.branch32(JITCompiler::Below, op1TagGPR, JITCompiler::TrustedImm32(JSValue::LowestTag + 1));
2461             JITCompiler::Jump isUndefined = m_jit.branch32(JITCompiler::Equal, op1TagGPR, TrustedImm32(JSValue::UndefinedTag));
2462
2463             static const double zero = 0;
2464             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&zero), resultFPR);
2465
2466             JITCompiler::Jump isNull = m_jit.branch32(JITCompiler::Equal, op1TagGPR, TrustedImm32(JSValue::NullTag));
2467             done.append(isNull);
2468
2469             DFG_TYPE_CHECK(JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), ~SpecCell, m_jit.branch32(JITCompiler::NotEqual, op1TagGPR, TrustedImm32(JSValue::BooleanTag)));
2470
2471             JITCompiler::Jump isFalse = m_jit.branchTest32(JITCompiler::Zero, op1PayloadGPR, TrustedImm32(1));
2472             static const double one = 1;
2473             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&one), resultFPR);
2474             done.append(m_jit.jump());
2475             done.append(isFalse);
2476
2477             isUndefined.link(&m_jit);
2478             static const double NaN = PNaN;
2479             m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&NaN), resultFPR);
2480             done.append(m_jit.jump());
2481
2482             isNumber.link(&m_jit);
2483         } else if (needsTypeCheck(node->child1(), SpecBytecodeNumber)) {
2484             typeCheck(
2485                 JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), SpecBytecodeNumber,
2486                 m_jit.branch32(MacroAssembler::AboveOrEqual, op1TagGPR, TrustedImm32(JSValue::LowestTag)));
2487         }
2488
2489         unboxDouble(op1TagGPR, op1PayloadGPR, resultFPR, tempFPR);
2490         done.append(m_jit.jump());
2491     
2492         isInteger.link(&m_jit);
2493         m_jit.convertInt32ToDouble(op1PayloadGPR, resultFPR);
2494         done.link(&m_jit);
2495 #endif // USE(JSVALUE64)
2496     
2497         doubleResult(resultFPR, node);
2498         return;
2499     }
2500         
2501 #if USE(JSVALUE64)
2502     case Int52RepUse: {
2503         SpeculateStrictInt52Operand value(this, node->child1());
2504         FPRTemporary result(this);
2505         
2506         GPRReg valueGPR = value.gpr();
2507         FPRReg resultFPR = result.fpr();
2508
2509         m_jit.convertInt64ToDouble(valueGPR, resultFPR);
2510         
2511         doubleResult(resultFPR, node);
2512         return;
2513     }
2514 #endif // USE(JSVALUE64)
2515         
2516     default:
2517         RELEASE_ASSERT_NOT_REACHED();
2518         return;
2519     }
2520 }
2521
2522 void SpeculativeJIT::compileValueRep(Node* node)
2523 {
2524     switch (node->child1().useKind()) {
2525     case DoubleRepUse: {
2526         SpeculateDoubleOperand value(this, node->child1());
2527         JSValueRegsTemporary result(this);
2528         
2529         FPRReg valueFPR = value.fpr();
2530         JSValueRegs resultRegs = result.regs();
2531         
2532         // It's very tempting to in-place filter the value to indicate that it's not impure NaN
2533         // anymore. Unfortunately, this would be unsound. If it's a GetLocal or if the value was
2534         // subject to a prior SetLocal, filtering the value would imply that the corresponding
2535         // local was purified.
2536         if (needsTypeCheck(node->child1(), ~SpecDoubleImpureNaN))
2537             m_jit.purifyNaN(valueFPR);
2538
2539         boxDouble(valueFPR, resultRegs);
2540         
2541         jsValueResult(resultRegs, node);
2542         return;
2543     }
2544         
2545 #if USE(JSVALUE64)
2546     case Int52RepUse: {
2547         SpeculateStrictInt52Operand value(this, node->child1());
2548         GPRTemporary result(this);
2549         
2550         GPRReg valueGPR = value.gpr();
2551         GPRReg resultGPR = result.gpr();
2552         
2553         boxInt52(valueGPR, resultGPR, DataFormatStrictInt52);
2554         
2555         jsValueResult(resultGPR, node);
2556         return;
2557     }
2558 #endif // USE(JSVALUE64)
2559         
2560     default:
2561         RELEASE_ASSERT_NOT_REACHED();
2562         return;
2563     }
2564 }
2565
2566 static double clampDoubleToByte(double d)
2567 {
2568     d += 0.5;
2569     if (!(d > 0))
2570         d = 0;
2571     else if (d > 255)
2572         d = 255;
2573     return d;
2574 }
2575
2576 static void compileClampIntegerToByte(JITCompiler& jit, GPRReg result)
2577 {
2578     MacroAssembler::Jump inBounds = jit.branch32(MacroAssembler::BelowOrEqual, result, JITCompiler::TrustedImm32(0xff));
2579     MacroAssembler::Jump tooBig = jit.branch32(MacroAssembler::GreaterThan, result, JITCompiler::TrustedImm32(0xff));
2580     jit.xorPtr(result, result);
2581     MacroAssembler::Jump clamped = jit.jump();
2582     tooBig.link(&jit);
2583     jit.move(JITCompiler::TrustedImm32(255), result);
2584     clamped.link(&jit);
2585     inBounds.link(&jit);
2586 }
2587
2588 static void compileClampDoubleToByte(JITCompiler& jit, GPRReg result, FPRReg source, FPRReg scratch)
2589 {
2590     // Unordered compare so we pick up NaN
2591     static const double zero = 0;
2592     static const double byteMax = 255;
2593     static const double half = 0.5;
2594     jit.loadDouble(MacroAssembler::TrustedImmPtr(&zero), scratch);
2595     MacroAssembler::Jump tooSmall = jit.branchDouble(MacroAssembler::DoubleLessThanOrEqualOrUnordered, source, scratch);
2596     jit.loadDouble(MacroAssembler::TrustedImmPtr(&byteMax), scratch);
2597     MacroAssembler::Jump tooBig = jit.branchDouble(MacroAssembler::DoubleGreaterThan, source, scratch);
2598     
2599     jit.loadDouble(MacroAssembler::TrustedImmPtr(&half), scratch);
2600     // FIXME: This should probably just use a floating point round!
2601     // https://bugs.webkit.org/show_bug.cgi?id=72054
2602     jit.addDouble(source, scratch);
2603     jit.truncateDoubleToInt32(scratch, result);   
2604     MacroAssembler::Jump truncatedInt = jit.jump();
2605     
2606     tooSmall.link(&jit);
2607     jit.xorPtr(result, result);
2608     MacroAssembler::Jump zeroed = jit.jump();
2609     
2610     tooBig.link(&jit);
2611     jit.move(JITCompiler::TrustedImm32(255), result);
2612     
2613     truncatedInt.link(&jit);
2614     zeroed.link(&jit);
2615
2616 }
2617
2618 JITCompiler::Jump SpeculativeJIT::jumpForTypedArrayOutOfBounds(Node* node, GPRReg baseGPR, GPRReg indexGPR)
2619 {
2620     if (node->op() == PutByValAlias)
2621         return JITCompiler::Jump();
2622     JSArrayBufferView* view = m_jit.graph().tryGetFoldableView(
2623         m_state.forNode(m_jit.graph().child(node, 0)).m_value, node->arrayMode());
2624     if (view) {
2625         uint32_t length = view->length();
2626         Node* indexNode = m_jit.graph().child(node, 1).node();
2627         if (indexNode->isInt32Constant() && indexNode->asUInt32() < length)
2628             return JITCompiler::Jump();
2629         return m_jit.branch32(
2630             MacroAssembler::AboveOrEqual, indexGPR, MacroAssembler::Imm32(length));
2631     }
2632     return m_jit.branch32(
2633         MacroAssembler::AboveOrEqual, indexGPR,
2634         MacroAssembler::Address(baseGPR, JSArrayBufferView::offsetOfLength()));
2635 }
2636
2637 void SpeculativeJIT::emitTypedArrayBoundsCheck(Node* node, GPRReg baseGPR, GPRReg indexGPR)
2638 {
2639     JITCompiler::Jump jump = jumpForTypedArrayOutOfBounds(node, baseGPR, indexGPR);
2640     if (!jump.isSet())
2641         return;
2642     speculationCheck(OutOfBounds, JSValueRegs(), 0, jump);
2643 }
2644
2645 JITCompiler::Jump SpeculativeJIT::jumpForTypedArrayIsNeuteredIfOutOfBounds(Node* node, GPRReg base, JITCompiler::Jump outOfBounds)
2646 {
2647     JITCompiler::Jump done;
2648     if (outOfBounds.isSet()) {
2649         done = m_jit.jump();
2650         if (node->arrayMode().isInBounds())
2651             speculationCheck(OutOfBounds, JSValueSource(), 0, outOfBounds);
2652         else {
2653             outOfBounds.link(&m_jit);
2654
2655             JITCompiler::Jump notWasteful = m_jit.branch32(
2656                 MacroAssembler::NotEqual,
2657                 MacroAssembler::Address(base, JSArrayBufferView::offsetOfMode()),
2658                 TrustedImm32(WastefulTypedArray));
2659
2660             JITCompiler::Jump hasNullVector = m_jit.branchTestPtr(
2661                 MacroAssembler::Zero,
2662                 MacroAssembler::Address(base, JSArrayBufferView::offsetOfVector()));
2663             speculationCheck(Uncountable, JSValueSource(), node, hasNullVector);
2664             notWasteful.link(&m_jit);
2665         }
2666     }
2667     return done;
2668 }
2669
2670 void SpeculativeJIT::compileGetByValOnIntTypedArray(Node* node, TypedArrayType type)
2671 {
2672     ASSERT(isInt(type));
2673     
2674     SpeculateCellOperand base(this, node->child1());
2675     SpeculateStrictInt32Operand property(this, node->child2());
2676     StorageOperand storage(this, node->child3());
2677
2678     GPRReg baseReg = base.gpr();
2679     GPRReg propertyReg = property.gpr();
2680     GPRReg storageReg = storage.gpr();
2681
2682     GPRTemporary result(this);
2683     GPRReg resultReg = result.gpr();
2684
2685     ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2686
2687     emitTypedArrayBoundsCheck(node, baseReg, propertyReg);
2688     switch (elementSize(type)) {
2689     case 1:
2690         if (isSigned(type))
2691             m_jit.load8SignedExtendTo32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2692         else
2693             m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2694         break;
2695     case 2:
2696         if (isSigned(type))
2697             m_jit.load16SignedExtendTo32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2698         else
2699             m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2700         break;
2701     case 4:
2702         m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
2703         break;
2704     default:
2705         CRASH();
2706     }
2707     if (elementSize(type) < 4 || isSigned(type)) {
2708         int32Result(resultReg, node);
2709         return;
2710     }
2711     
2712     ASSERT(elementSize(type) == 4 && !isSigned(type));
2713     if (node->shouldSpeculateInt32()) {
2714         speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, resultReg, TrustedImm32(0)));
2715         int32Result(resultReg, node);
2716         return;
2717     }
2718     
2719 #if USE(JSVALUE64)
2720     if (node->shouldSpeculateAnyInt()) {
2721         m_jit.zeroExtend32ToPtr(resultReg, resultReg);
2722         strictInt52Result(resultReg, node);
2723         return;
2724     }
2725 #endif
2726     
2727     FPRTemporary fresult(this);
2728     m_jit.convertInt32ToDouble(resultReg, fresult.fpr());
2729     JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, resultReg, TrustedImm32(0));
2730     m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), fresult.fpr());
2731     positive.link(&m_jit);
2732     doubleResult(fresult.fpr(), node);
2733 }
2734
2735 void SpeculativeJIT::compilePutByValForIntTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
2736 {
2737     ASSERT(isInt(type));
2738     
2739     StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
2740     GPRReg storageReg = storage.gpr();
2741     
2742     Edge valueUse = m_jit.graph().varArgChild(node, 2);
2743     
2744     GPRTemporary value;
2745 #if USE(JSVALUE32_64)
2746     GPRTemporary propertyTag;
2747     GPRTemporary valueTag;
2748 #endif
2749
2750     GPRReg valueGPR = InvalidGPRReg;
2751 #if USE(JSVALUE32_64)
2752     GPRReg propertyTagGPR = InvalidGPRReg;
2753     GPRReg valueTagGPR = InvalidGPRReg;
2754 #endif
2755
2756     JITCompiler::JumpList slowPathCases;
2757
2758     if (valueUse->isConstant()) {
2759         JSValue jsValue = valueUse->asJSValue();
2760         if (!jsValue.isNumber()) {
2761             terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
2762             noResult(node);
2763             return;
2764         }
2765         double d = jsValue.asNumber();
2766         if (isClamped(type)) {
2767             ASSERT(elementSize(type) == 1);
2768             d = clampDoubleToByte(d);
2769         }
2770         GPRTemporary scratch(this);
2771         GPRReg scratchReg = scratch.gpr();
2772         m_jit.move(Imm32(toInt32(d)), scratchReg);
2773         value.adopt(scratch);
2774         valueGPR = scratchReg;
2775     } else {
2776         switch (valueUse.useKind()) {
2777         case Int32Use: {
2778             SpeculateInt32Operand valueOp(this, valueUse);
2779             GPRTemporary scratch(this);
2780             GPRReg scratchReg = scratch.gpr();
2781             m_jit.move(valueOp.gpr(), scratchReg);
2782             if (isClamped(type)) {
2783                 ASSERT(elementSize(type) == 1);
2784                 compileClampIntegerToByte(m_jit, scratchReg);
2785             }
2786             value.adopt(scratch);
2787             valueGPR = scratchReg;
2788             break;
2789         }
2790             
2791 #if USE(JSVALUE64)
2792         case Int52RepUse: {
2793             SpeculateStrictInt52Operand valueOp(this, valueUse);
2794             GPRTemporary scratch(this);
2795             GPRReg scratchReg = scratch.gpr();
2796             m_jit.move(valueOp.gpr(), scratchReg);
2797             if (isClamped(type)) {
2798                 ASSERT(elementSize(type) == 1);
2799                 MacroAssembler::Jump inBounds = m_jit.branch64(
2800                     MacroAssembler::BelowOrEqual, scratchReg, JITCompiler::TrustedImm64(0xff));
2801                 MacroAssembler::Jump tooBig = m_jit.branch64(
2802                     MacroAssembler::GreaterThan, scratchReg, JITCompiler::TrustedImm64(0xff));
2803                 m_jit.move(TrustedImm32(0), scratchReg);
2804                 MacroAssembler::Jump clamped = m_jit.jump();
2805                 tooBig.link(&m_jit);
2806                 m_jit.move(JITCompiler::TrustedImm32(255), scratchReg);
2807                 clamped.link(&m_jit);
2808                 inBounds.link(&m_jit);
2809             }
2810             value.adopt(scratch);
2811             valueGPR = scratchReg;
2812             break;
2813         }
2814 #endif // USE(JSVALUE64)
2815             
2816         case DoubleRepUse: {
2817             if (isClamped(type)) {
2818                 ASSERT(elementSize(type) == 1);
2819                 SpeculateDoubleOperand valueOp(this, valueUse);
2820                 GPRTemporary result(this);
2821                 FPRTemporary floatScratch(this);
2822                 FPRReg fpr = valueOp.fpr();
2823                 GPRReg gpr = result.gpr();
2824                 compileClampDoubleToByte(m_jit, gpr, fpr, floatScratch.fpr());
2825                 value.adopt(result);
2826                 valueGPR = gpr;
2827             } else {
2828 #if USE(JSVALUE32_64)
2829                 GPRTemporary realPropertyTag(this);
2830                 propertyTag.adopt(realPropertyTag);
2831                 propertyTagGPR = propertyTag.gpr();
2832
2833                 GPRTemporary realValueTag(this);
2834                 valueTag.adopt(realValueTag);
2835                 valueTagGPR = valueTag.gpr();
2836 #endif
2837                 SpeculateDoubleOperand valueOp(this, valueUse);
2838                 GPRTemporary result(this);
2839                 FPRReg fpr = valueOp.fpr();
2840                 GPRReg gpr = result.gpr();
2841                 MacroAssembler::Jump notNaN = m_jit.branchDouble(MacroAssembler::DoubleEqual, fpr, fpr);
2842                 m_jit.xorPtr(gpr, gpr);
2843                 MacroAssembler::JumpList fixed(m_jit.jump());
2844                 notNaN.link(&m_jit);
2845
2846                 fixed.append(m_jit.branchTruncateDoubleToInt32(
2847                     fpr, gpr, MacroAssembler::BranchIfTruncateSuccessful));
2848
2849 #if USE(JSVALUE64)
2850                 m_jit.or64(GPRInfo::tagTypeNumberRegister, property);
2851                 boxDouble(fpr, gpr);
2852 #else
2853                 m_jit.move(TrustedImm32(JSValue::Int32Tag), propertyTagGPR);
2854                 boxDouble(fpr, valueTagGPR, gpr);
2855 #endif
2856                 slowPathCases.append(m_jit.jump());
2857
2858                 fixed.link(&m_jit);
2859                 value.adopt(result);
2860                 valueGPR = gpr;
2861             }
2862             break;
2863         }
2864             
2865         default:
2866             RELEASE_ASSERT_NOT_REACHED();
2867             break;
2868         }
2869     }
2870     
2871     ASSERT_UNUSED(valueGPR, valueGPR != property);
2872     ASSERT(valueGPR != base);
2873     ASSERT(valueGPR != storageReg);
2874     JITCompiler::Jump outOfBounds = jumpForTypedArrayOutOfBounds(node, base, property);
2875
2876     switch (elementSize(type)) {
2877     case 1:
2878         m_jit.store8(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesOne));
2879         break;
2880     case 2:
2881         m_jit.store16(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesTwo));
2882         break;
2883     case 4:
2884         m_jit.store32(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
2885         break;
2886     default:
2887         CRASH();
2888     }
2889
2890     JITCompiler::Jump done = jumpForTypedArrayIsNeuteredIfOutOfBounds(node, base, outOfBounds);
2891     if (done.isSet())
2892         done.link(&m_jit);
2893
2894     if (!slowPathCases.empty()) {
2895 #if USE(JSVALUE64)
2896         if (node->op() == PutByValDirect) {
2897             addSlowPathGenerator(slowPathCall(
2898                 slowPathCases, this,
2899                 m_jit.isStrictModeFor(node->origin.semantic) ? operationPutByValDirectStrict : operationPutByValDirectNonStrict,
2900                 NoResult, base, property, valueGPR));
2901         } else {
2902             addSlowPathGenerator(slowPathCall(
2903                 slowPathCases, this,
2904                 m_jit.isStrictModeFor(node->origin.semantic) ? operationPutByValStrict : operationPutByValNonStrict,
2905                 NoResult, base, property, valueGPR));
2906         }
2907 #else // not USE(JSVALUE64)
2908         if (node->op() == PutByValDirect) {
2909             addSlowPathGenerator(slowPathCall(
2910                 slowPathCases, this,
2911                 m_jit.codeBlock()->isStrictMode() ? operationPutByValDirectCellStrict : operationPutByValDirectCellNonStrict,
2912                 NoResult, base, JSValueRegs(propertyTagGPR, property), JSValueRegs(valueTagGPR, valueGPR)));
2913         } else {
2914             addSlowPathGenerator(slowPathCall(
2915                 slowPathCases, this,
2916                 m_jit.codeBlock()->isStrictMode() ? operationPutByValCellStrict : operationPutByValCellNonStrict,
2917                 NoResult, base, JSValueRegs(propertyTagGPR, property), JSValueRegs(valueTagGPR, valueGPR)));
2918         }
2919 #endif
2920     }
2921     noResult(node);
2922 }
2923
2924 void SpeculativeJIT::compileGetByValOnFloatTypedArray(Node* node, TypedArrayType type)
2925 {
2926     ASSERT(isFloat(type));
2927     
2928     SpeculateCellOperand base(this, node->child1());
2929     SpeculateStrictInt32Operand property(this, node->child2());
2930     StorageOperand storage(this, node->child3());
2931
2932     GPRReg baseReg = base.gpr();
2933     GPRReg propertyReg = property.gpr();
2934     GPRReg storageReg = storage.gpr();
2935
2936     ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2937
2938     FPRTemporary result(this);
2939     FPRReg resultReg = result.fpr();
2940     emitTypedArrayBoundsCheck(node, baseReg, propertyReg);
2941     switch (elementSize(type)) {
2942     case 4:
2943         m_jit.loadFloat(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
2944         m_jit.convertFloatToDouble(resultReg, resultReg);
2945         break;
2946     case 8: {
2947         m_jit.loadDouble(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight), resultReg);
2948         break;
2949     }
2950     default:
2951         RELEASE_ASSERT_NOT_REACHED();
2952     }
2953     
2954     doubleResult(resultReg, node);
2955 }
2956
2957 void SpeculativeJIT::compilePutByValForFloatTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
2958 {
2959     ASSERT(isFloat(type));
2960     
2961     StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
2962     GPRReg storageReg = storage.gpr();
2963     
2964     Edge baseUse = m_jit.graph().varArgChild(node, 0);
2965     Edge valueUse = m_jit.graph().varArgChild(node, 2);
2966
2967     SpeculateDoubleOperand valueOp(this, valueUse);
2968     FPRTemporary scratch(this);
2969     FPRReg valueFPR = valueOp.fpr();
2970     FPRReg scratchFPR = scratch.fpr();
2971
2972     ASSERT_UNUSED(baseUse, node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(baseUse)));
2973     
2974     MacroAssembler::Jump outOfBounds = jumpForTypedArrayOutOfBounds(node, base, property);
2975     
2976     switch (elementSize(type)) {
2977     case 4: {
2978         m_jit.moveDouble(valueFPR, scratchFPR);
2979         m_jit.convertDoubleToFloat(valueFPR, scratchFPR);
2980         m_jit.storeFloat(scratchFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
2981         break;
2982     }
2983     case 8:
2984         m_jit.storeDouble(valueFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesEight));
2985         break;
2986     default:
2987         RELEASE_ASSERT_NOT_REACHED();
2988     }
2989
2990     JITCompiler::Jump done = jumpForTypedArrayIsNeuteredIfOutOfBounds(node, base, outOfBounds);
2991     if (done.isSet())
2992         done.link(&m_jit);
2993     noResult(node);
2994 }
2995
2996 void SpeculativeJIT::compileInstanceOfForObject(Node*, GPRReg valueReg, GPRReg prototypeReg, GPRReg scratchReg, GPRReg scratch2Reg)
2997 {
2998     // Check that prototype is an object.
2999     speculationCheck(BadType, JSValueRegs(), 0, m_jit.branchIfNotObject(prototypeReg));
3000     
3001     // Initialize scratchReg with the value being checked.
3002     m_jit.move(valueReg, scratchReg);
3003     
3004     // Walk up the prototype chain of the value (in scratchReg), comparing to prototypeReg.
3005     MacroAssembler::Label loop(&m_jit);
3006     MacroAssembler::Jump performDefaultHasInstance = m_jit.branch8(MacroAssembler::Equal,
3007         MacroAssembler::Address(scratchReg, JSCell::typeInfoTypeOffset()), TrustedImm32(ProxyObjectType));
3008     m_jit.emitLoadStructure(scratchReg, scratchReg, scratch2Reg);
3009     m_jit.loadPtr(MacroAssembler::Address(scratchReg, Structure::prototypeOffset() + CellPayloadOffset), scratchReg);
3010     MacroAssembler::Jump isInstance = m_jit.branchPtr(MacroAssembler::Equal, scratchReg, prototypeReg);
3011 #if USE(JSVALUE64)
3012     m_jit.branchIfCell(JSValueRegs(scratchReg)).linkTo(loop, &m_jit);
3013 #else
3014     m_jit.branchTestPtr(MacroAssembler::NonZero, scratchReg).linkTo(loop, &m_jit);
3015 #endif
3016     
3017     // No match - result is false.
3018 #if USE(JSVALUE64)
3019     m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(false))), scratchReg);
3020 #else
3021     m_jit.move(MacroAssembler::TrustedImm32(0), scratchReg);
3022 #endif
3023     MacroAssembler::JumpList doneJumps; 
3024     doneJumps.append(m_jit.jump());
3025
3026     performDefaultHasInstance.link(&m_jit);
3027     silentSpillAllRegisters(scratchReg);
3028     callOperation(operationDefaultHasInstance, scratchReg, valueReg, prototypeReg); 
3029     silentFillAllRegisters(scratchReg);
3030     m_jit.exceptionCheck();
3031 #if USE(JSVALUE64)
3032     m_jit.or32(TrustedImm32(ValueFalse), scratchReg);
3033 #endif
3034     doneJumps.append(m_jit.jump());
3035     
3036     isInstance.link(&m_jit);
3037 #if USE(JSVALUE64)
3038     m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(true))), scratchReg);
3039 #else
3040     m_jit.move(MacroAssembler::TrustedImm32(1), scratchReg);
3041 #endif
3042     
3043     doneJumps.link(&m_jit);
3044 }
3045
3046 void SpeculativeJIT::compileCheckTypeInfoFlags(Node* node)
3047 {
3048     SpeculateCellOperand base(this, node->child1());
3049
3050     GPRReg baseGPR = base.gpr();
3051
3052     speculationCheck(BadTypeInfoFlags, JSValueRegs(), 0, m_jit.branchTest8(MacroAssembler::Zero, MacroAssembler::Address(baseGPR, JSCell::typeInfoFlagsOffset()), MacroAssembler::TrustedImm32(node->typeInfoOperand())));
3053
3054     noResult(node);
3055 }
3056
3057 void SpeculativeJIT::compileInstanceOf(Node* node)
3058 {
3059     if (node->child1().useKind() == UntypedUse) {
3060         // It might not be a cell. Speculate less aggressively.
3061         // Or: it might only be used once (i.e. by us), so we get zero benefit
3062         // from speculating any more aggressively than we absolutely need to.
3063         
3064         JSValueOperand value(this, node->child1());
3065         SpeculateCellOperand prototype(this, node->child2());
3066         GPRTemporary scratch(this);
3067         GPRTemporary scratch2(this);
3068         
3069         GPRReg prototypeReg = prototype.gpr();
3070         GPRReg scratchReg = scratch.gpr();
3071         GPRReg scratch2Reg = scratch2.gpr();
3072         
3073         MacroAssembler::Jump isCell = m_jit.branchIfCell(value.jsValueRegs());
3074         GPRReg valueReg = value.jsValueRegs().payloadGPR();
3075         moveFalseTo(scratchReg);
3076
3077         MacroAssembler::Jump done = m_jit.jump();
3078         
3079         isCell.link(&m_jit);
3080         
3081         compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg, scratch2Reg);
3082         
3083         done.link(&m_jit);
3084
3085         blessedBooleanResult(scratchReg, node);
3086         return;
3087     }
3088     
3089     SpeculateCellOperand value(this, node->child1());
3090     SpeculateCellOperand prototype(this, node->child2());
3091     
3092     GPRTemporary scratch(this);
3093     GPRTemporary scratch2(this);
3094     
3095     GPRReg valueReg = value.gpr();
3096     GPRReg prototypeReg = prototype.gpr();
3097     GPRReg scratchReg = scratch.gpr();
3098     GPRReg scratch2Reg = scratch2.gpr();
3099     
3100     compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg, scratch2Reg);
3101
3102     blessedBooleanResult(scratchReg, node);
3103 }
3104
3105 template<typename SnippetGenerator, J_JITOperation_EJJ snippetSlowPathFunction>
3106 void SpeculativeJIT::emitUntypedBitOp(Node* node)
3107 {
3108     Edge& leftChild = node->child1();
3109     Edge& rightChild = node->child2();
3110
3111     if (isKnownNotNumber(leftChild.node()) || isKnownNotNumber(rightChild.node())) {
3112         JSValueOperand left(this, leftChild);
3113         JSValueOperand right(this, rightChild);
3114         JSValueRegs leftRegs = left.jsValueRegs();
3115         JSValueRegs rightRegs = right.jsValueRegs();
3116 #if USE(JSVALUE64)
3117         GPRTemporary result(this);
3118         JSValueRegs resultRegs = JSValueRegs(result.gpr());
3119 #else
3120         GPRTemporary resultTag(this);
3121         GPRTemporary resultPayload(this);
3122         JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3123 #endif
3124         flushRegisters();
3125         callOperation(snippetSlowPathFunction, resultRegs, leftRegs, rightRegs);
3126         m_jit.exceptionCheck();
3127
3128         jsValueResult(resultRegs, node);
3129         return;
3130     }
3131
3132     Optional<JSValueOperand> left;
3133     Optional<JSValueOperand> right;
3134
3135     JSValueRegs leftRegs;
3136     JSValueRegs rightRegs;
3137
3138 #if USE(JSVALUE64)
3139     GPRTemporary result(this);
3140     JSValueRegs resultRegs = JSValueRegs(result.gpr());
3141     GPRTemporary scratch(this);
3142     GPRReg scratchGPR = scratch.gpr();
3143 #else
3144     GPRTemporary resultTag(this);
3145     GPRTemporary resultPayload(this);
3146     JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3147     GPRReg scratchGPR = resultTag.gpr();
3148 #endif
3149
3150     SnippetOperand leftOperand;
3151     SnippetOperand rightOperand;
3152
3153     // The snippet generator does not support both operands being constant. If the left
3154     // operand is already const, we'll ignore the right operand's constness.
3155     if (leftChild->isInt32Constant())
3156         leftOperand.setConstInt32(leftChild->asInt32());
3157     else if (rightChild->isInt32Constant())
3158         rightOperand.setConstInt32(rightChild->asInt32());
3159
3160     RELEASE_ASSERT(!leftOperand.isConst() || !rightOperand.isConst());
3161
3162     if (!leftOperand.isConst()) {
3163         left = JSValueOperand(this, leftChild);
3164         leftRegs = left->jsValueRegs();
3165     }
3166     if (!rightOperand.isConst()) {
3167         right = JSValueOperand(this, rightChild);
3168         rightRegs = right->jsValueRegs();
3169     }
3170
3171     SnippetGenerator gen(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs, scratchGPR);
3172     gen.generateFastPath(m_jit);
3173
3174     ASSERT(gen.didEmitFastPath());
3175     gen.endJumpList().append(m_jit.jump());
3176
3177     gen.slowPathJumpList().link(&m_jit);
3178     silentSpillAllRegisters(resultRegs);
3179
3180     if (leftOperand.isConst()) {
3181         leftRegs = resultRegs;
3182         m_jit.moveValue(leftChild->asJSValue(), leftRegs);
3183     } else if (rightOperand.isConst()) {
3184         rightRegs = resultRegs;
3185         m_jit.moveValue(rightChild->asJSValue(), rightRegs);
3186     }
3187
3188     callOperation(snippetSlowPathFunction, resultRegs, leftRegs, rightRegs);
3189
3190     silentFillAllRegisters(resultRegs);
3191     m_jit.exceptionCheck();
3192
3193     gen.endJumpList().link(&m_jit);
3194     jsValueResult(resultRegs, node);
3195 }
3196
3197 void SpeculativeJIT::compileBitwiseOp(Node* node)
3198 {
3199     NodeType op = node->op();
3200     Edge& leftChild = node->child1();
3201     Edge& rightChild = node->child2();
3202
3203     if (leftChild.useKind() == UntypedUse || rightChild.useKind() == UntypedUse) {
3204         switch (op) {
3205         case BitAnd:
3206             emitUntypedBitOp<JITBitAndGenerator, operationValueBitAnd>(node);
3207             return;
3208         case BitOr:
3209             emitUntypedBitOp<JITBitOrGenerator, operationValueBitOr>(node);
3210             return;
3211         case BitXor:
3212             emitUntypedBitOp<JITBitXorGenerator, operationValueBitXor>(node);
3213             return;
3214         default:
3215             RELEASE_ASSERT_NOT_REACHED();
3216         }
3217     }
3218
3219     if (leftChild->isInt32Constant()) {
3220         SpeculateInt32Operand op2(this, rightChild);
3221         GPRTemporary result(this, Reuse, op2);
3222
3223         bitOp(op, leftChild->asInt32(), op2.gpr(), result.gpr());
3224
3225         int32Result(result.gpr(), node);
3226
3227     } else if (rightChild->isInt32Constant()) {
3228         SpeculateInt32Operand op1(this, leftChild);
3229         GPRTemporary result(this, Reuse, op1);
3230
3231         bitOp(op, rightChild->asInt32(), op1.gpr(), result.gpr());
3232
3233         int32Result(result.gpr(), node);
3234
3235     } else {
3236         SpeculateInt32Operand op1(this, leftChild);
3237         SpeculateInt32Operand op2(this, rightChild);
3238         GPRTemporary result(this, Reuse, op1, op2);
3239         
3240         GPRReg reg1 = op1.gpr();
3241         GPRReg reg2 = op2.gpr();
3242         bitOp(op, reg1, reg2, result.gpr());
3243         
3244         int32Result(result.gpr(), node);
3245     }
3246 }
3247
3248 void SpeculativeJIT::emitUntypedRightShiftBitOp(Node* node)
3249 {
3250     J_JITOperation_EJJ snippetSlowPathFunction = node->op() == BitRShift
3251         ? operationValueBitRShift : operationValueBitURShift;
3252     JITRightShiftGenerator::ShiftType shiftType = node->op() == BitRShift
3253         ? JITRightShiftGenerator::SignedShift : JITRightShiftGenerator::UnsignedShift;
3254
3255     Edge& leftChild = node->child1();
3256     Edge& rightChild = node->child2();
3257
3258     if (isKnownNotNumber(leftChild.node()) || isKnownNotNumber(rightChild.node())) {
3259         JSValueOperand left(this, leftChild);
3260         JSValueOperand right(this, rightChild);
3261         JSValueRegs leftRegs = left.jsValueRegs();
3262         JSValueRegs rightRegs = right.jsValueRegs();
3263 #if USE(JSVALUE64)
3264         GPRTemporary result(this);
3265         JSValueRegs resultRegs = JSValueRegs(result.gpr());
3266 #else
3267         GPRTemporary resultTag(this);
3268         GPRTemporary resultPayload(this);
3269         JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3270 #endif
3271         flushRegisters();
3272         callOperation(snippetSlowPathFunction, resultRegs, leftRegs, rightRegs);
3273         m_jit.exceptionCheck();
3274
3275         jsValueResult(resultRegs, node);
3276         return;
3277     }
3278
3279     Optional<JSValueOperand> left;
3280     Optional<JSValueOperand> right;
3281
3282     JSValueRegs leftRegs;
3283     JSValueRegs rightRegs;
3284
3285     FPRTemporary leftNumber(this);
3286     FPRReg leftFPR = leftNumber.fpr();
3287
3288 #if USE(JSVALUE64)
3289     GPRTemporary result(this);
3290     JSValueRegs resultRegs = JSValueRegs(result.gpr());
3291     GPRTemporary scratch(this);
3292     GPRReg scratchGPR = scratch.gpr();
3293     FPRReg scratchFPR = InvalidFPRReg;
3294 #else
3295     GPRTemporary resultTag(this);
3296     GPRTemporary resultPayload(this);
3297     JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3298     GPRReg scratchGPR = resultTag.gpr();
3299     FPRTemporary fprScratch(this);
3300     FPRReg scratchFPR = fprScratch.fpr();
3301 #endif
3302
3303     SnippetOperand leftOperand;
3304     SnippetOperand rightOperand;
3305
3306     // The snippet generator does not support both operands being constant. If the left
3307     // operand is already const, we'll ignore the right operand's constness.
3308     if (leftChild->isInt32Constant())
3309         leftOperand.setConstInt32(leftChild->asInt32());
3310     else if (rightChild->isInt32Constant())
3311         rightOperand.setConstInt32(rightChild->asInt32());
3312
3313     RELEASE_ASSERT(!leftOperand.isConst() || !rightOperand.isConst());
3314
3315     if (!leftOperand.isConst()) {
3316         left = JSValueOperand(this, leftChild);
3317         leftRegs = left->jsValueRegs();
3318     }
3319     if (!rightOperand.isConst()) {
3320         right = JSValueOperand(this, rightChild);
3321         rightRegs = right->jsValueRegs();
3322     }
3323
3324     JITRightShiftGenerator gen(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs,
3325         leftFPR, scratchGPR, scratchFPR, shiftType);
3326     gen.generateFastPath(m_jit);
3327
3328     ASSERT(gen.didEmitFastPath());
3329     gen.endJumpList().append(m_jit.jump());
3330
3331     gen.slowPathJumpList().link(&m_jit);
3332     silentSpillAllRegisters(resultRegs);
3333
3334     if (leftOperand.isConst()) {
3335         leftRegs = resultRegs;
3336         m_jit.moveValue(leftChild->asJSValue(), leftRegs);
3337     } else if (rightOperand.isConst()) {
3338         rightRegs = resultRegs;
3339         m_jit.moveValue(rightChild->asJSValue(), rightRegs);
3340     }
3341
3342     callOperation(snippetSlowPathFunction, resultRegs, leftRegs, rightRegs);
3343
3344     silentFillAllRegisters(resultRegs);
3345     m_jit.exceptionCheck();
3346
3347     gen.endJumpList().link(&m_jit);
3348     jsValueResult(resultRegs, node);
3349     return;
3350 }
3351
3352 void SpeculativeJIT::compileShiftOp(Node* node)
3353 {
3354     NodeType op = node->op();
3355     Edge& leftChild = node->child1();
3356     Edge& rightChild = node->child2();
3357
3358     if (leftChild.useKind() == UntypedUse || rightChild.useKind() == UntypedUse) {
3359         switch (op) {
3360         case BitLShift:
3361             emitUntypedBitOp<JITLeftShiftGenerator, operationValueBitLShift>(node);
3362             return;
3363         case BitRShift:
3364         case BitURShift:
3365             emitUntypedRightShiftBitOp(node);
3366             return;
3367         default:
3368             RELEASE_ASSERT_NOT_REACHED();
3369         }
3370     }
3371
3372     if (rightChild->isInt32Constant()) {
3373         SpeculateInt32Operand op1(this, leftChild);
3374         GPRTemporary result(this, Reuse, op1);
3375
3376         shiftOp(op, op1.gpr(), rightChild->asInt32() & 0x1f, result.gpr());
3377
3378         int32Result(result.gpr(), node);
3379     } else {
3380         // Do not allow shift amount to be used as the result, MacroAssembler does not permit this.
3381         SpeculateInt32Operand op1(this, leftChild);
3382         SpeculateInt32Operand op2(this, rightChild);
3383         GPRTemporary result(this, Reuse, op1);
3384
3385         GPRReg reg1 = op1.gpr();
3386         GPRReg reg2 = op2.gpr();
3387         shiftOp(op, reg1, reg2, result.gpr());
3388
3389         int32Result(result.gpr(), node);
3390     }
3391 }
3392
3393 void SpeculativeJIT::compileValueAdd(Node* node)
3394 {
3395     Edge& leftChild = node->child1();
3396     Edge& rightChild = node->child2();
3397
3398     if (isKnownNotNumber(leftChild.node()) || isKnownNotNumber(rightChild.node())) {
3399         JSValueOperand left(this, leftChild);
3400         JSValueOperand right(this, rightChild);
3401         JSValueRegs leftRegs = left.jsValueRegs();
3402         JSValueRegs rightRegs = right.jsValueRegs();
3403 #if USE(JSVALUE64)
3404         GPRTemporary result(this);
3405         JSValueRegs resultRegs = JSValueRegs(result.gpr());
3406 #else
3407         GPRTemporary resultTag(this);
3408         GPRTemporary resultPayload(this);
3409         JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3410 #endif
3411         flushRegisters();
3412         callOperation(operationValueAddNotNumber, resultRegs, leftRegs, rightRegs);
3413         m_jit.exceptionCheck();
3414     
3415         jsValueResult(resultRegs, node);
3416         return;
3417     }
3418
3419 #if USE(JSVALUE64)
3420     bool needsScratchGPRReg = true;
3421     bool needsScratchFPRReg = false;
3422 #else
3423     bool needsScratchGPRReg = true;
3424     bool needsScratchFPRReg = true;
3425 #endif
3426
3427     ArithProfile* arithProfile = m_jit.graph().baselineCodeBlockFor(node->origin.semantic)->arithProfileForBytecodeOffset(node->origin.semantic.bytecodeIndex);
3428     JITAddIC* addIC = m_jit.codeBlock()->addJITAddIC(arithProfile);
3429     auto repatchingFunction = operationValueAddOptimize;
3430     auto nonRepatchingFunction = operationValueAdd;
3431     
3432     compileMathIC(node, addIC, needsScratchGPRReg, needsScratchFPRReg, repatchingFunction, nonRepatchingFunction);
3433 }
3434
3435 template <typename Generator, typename RepatchingFunction, typename NonRepatchingFunction>
3436 void SpeculativeJIT::compileMathIC(Node* node, JITBinaryMathIC<Generator>* mathIC, bool needsScratchGPRReg, bool needsScratchFPRReg, RepatchingFunction repatchingFunction, NonRepatchingFunction nonRepatchingFunction)
3437 {
3438     Edge& leftChild = node->child1();
3439     Edge& rightChild = node->child2();
3440
3441     Optional<JSValueOperand> left;
3442     Optional<JSValueOperand> right;
3443
3444     JSValueRegs leftRegs;
3445     JSValueRegs rightRegs;
3446
3447     FPRTemporary leftNumber(this);
3448     FPRTemporary rightNumber(this);
3449     FPRReg leftFPR = leftNumber.fpr();
3450     FPRReg rightFPR = rightNumber.fpr();
3451
3452     GPRReg scratchGPR = InvalidGPRReg;
3453     FPRReg scratchFPR = InvalidFPRReg;
3454
3455     Optional<FPRTemporary> fprScratch;
3456     if (needsScratchFPRReg) {
3457         fprScratch = FPRTemporary(this);
3458         scratchFPR = fprScratch->fpr();
3459     }
3460
3461 #if USE(JSVALUE64)
3462     Optional<GPRTemporary> gprScratch;
3463     if (needsScratchGPRReg) {
3464         gprScratch = GPRTemporary(this);
3465         scratchGPR = gprScratch->gpr();
3466     }
3467     GPRTemporary result(this);
3468     JSValueRegs resultRegs = JSValueRegs(result.gpr());
3469 #else
3470     GPRTemporary resultTag(this);
3471     GPRTemporary resultPayload(this);
3472     JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3473     if (needsScratchGPRReg)
3474         scratchGPR = resultRegs.tagGPR();
3475 #endif
3476
3477     SnippetOperand leftOperand(m_state.forNode(leftChild).resultType());
3478     SnippetOperand rightOperand(m_state.forNode(rightChild).resultType());
3479
3480     // The snippet generator does not support both operands being constant. If the left
3481     // operand is already const, we'll ignore the right operand's constness.
3482     if (leftChild->isInt32Constant())
3483         leftOperand.setConstInt32(leftChild->asInt32());
3484     else if (rightChild->isInt32Constant())
3485         rightOperand.setConstInt32(rightChild->asInt32());
3486
3487     ASSERT(!leftOperand.isConst() || !rightOperand.isConst());
3488     ASSERT(!(Generator::isLeftOperandValidConstant(leftOperand) && Generator::isRightOperandValidConstant(rightOperand)));
3489
3490     if (!Generator::isLeftOperandValidConstant(leftOperand)) {
3491         left = JSValueOperand(this, leftChild);
3492         leftRegs = left->jsValueRegs();
3493     }
3494     if (!Generator::isRightOperandValidConstant(rightOperand)) {
3495         right = JSValueOperand(this, rightChild);
3496         rightRegs = right->jsValueRegs();
3497     }
3498
3499 #if ENABLE(MATH_IC_STATS)
3500     auto inlineStart = m_jit.label();
3501 #endif
3502
3503     Box<MathICGenerationState> addICGenerationState = Box<MathICGenerationState>::create();
3504     mathIC->m_generator = Generator(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs, leftFPR, rightFPR, scratchGPR, scratchFPR);
3505
3506     bool shouldEmitProfiling = false;
3507     bool generatedInline = mathIC->generateInline(m_jit, *addICGenerationState, shouldEmitProfiling);
3508
3509     if (generatedInline) {
3510         ASSERT(!addICGenerationState->slowPathJumps.empty());
3511
3512         Vector<SilentRegisterSavePlan> savePlans;
3513         silentSpillAllRegistersImpl(false, savePlans, resultRegs);
3514
3515         auto done = m_jit.label();
3516
3517         addSlowPathGenerator([=, savePlans = WTFMove(savePlans)] () {
3518             addICGenerationState->slowPathJumps.link(&m_jit);
3519             addICGenerationState->slowPathStart = m_jit.label();
3520 #if ENABLE(MATH_IC_STATS)
3521             auto slowPathStart = m_jit.label();
3522 #endif
3523
3524             silentSpill(savePlans);
3525
3526             auto innerLeftRegs = leftRegs;
3527             auto innerRightRegs = rightRegs;
3528             if (Generator::isLeftOperandValidConstant(leftOperand)) {
3529                 innerLeftRegs = resultRegs;
3530                 m_jit.moveValue(leftChild->asJSValue(), innerLeftRegs);
3531             } else if (Generator::isRightOperandValidConstant(rightOperand)) {
3532                 innerRightRegs = resultRegs;
3533                 m_jit.moveValue(rightChild->asJSValue(), innerRightRegs);
3534             }
3535
3536             if (addICGenerationState->shouldSlowPathRepatch)
3537                 addICGenerationState->slowPathCall = callOperation(bitwise_cast<J_JITOperation_EJJMic>(repatchingFunction), resultRegs, innerLeftRegs, innerRightRegs, TrustedImmPtr(mathIC));
3538             else
3539                 addICGenerationState->slowPathCall = callOperation(nonRepatchingFunction, resultRegs, innerLeftRegs, innerRightRegs);
3540
3541             silentFill(savePlans);
3542             m_jit.exceptionCheck();
3543             m_jit.jump().linkTo(done, &m_jit);
3544
3545             m_jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
3546                 mathIC->finalizeInlineCode(*addICGenerationState, linkBuffer);
3547             });
3548
3549 #if ENABLE(MATH_IC_STATS)
3550             auto slowPathEnd = m_jit.label();
3551             m_jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
3552                 size_t size = static_cast<char*>(linkBuffer.locationOf(slowPathEnd).executableAddress()) - static_cast<char*>(linkBuffer.locationOf(slowPathStart).executableAddress());
3553                 mathIC->m_generatedCodeSize += size;
3554             });
3555 #endif
3556
3557         });
3558     } else {
3559         if (Generator::isLeftOperandValidConstant(leftOperand)) {
3560             left = JSValueOperand(this, leftChild);
3561             leftRegs = left->jsValueRegs();
3562         } else if (Generator::isRightOperandValidConstant(rightOperand)) {
3563             right = JSValueOperand(this, rightChild);
3564             rightRegs = right->jsValueRegs();
3565         }
3566
3567         flushRegisters();
3568         callOperation(nonRepatchingFunction, resultRegs, leftRegs, rightRegs);
3569         m_jit.exceptionCheck();
3570     }
3571
3572 #if ENABLE(MATH_IC_STATS)
3573     auto inlineEnd = m_jit.label();
3574     m_jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
3575         size_t size = static_cast<char*>(linkBuffer.locationOf(inlineEnd).executableAddress()) - static_cast<char*>(linkBuffer.locationOf(inlineStart).executableAddress());
3576         mathIC->m_generatedCodeSize += size;
3577     });
3578 #endif
3579
3580     jsValueResult(resultRegs, node);
3581     return;
3582 }
3583
3584 void SpeculativeJIT::compileInstanceOfCustom(Node* node)
3585 {
3586     // We could do something smarter here but this case is currently super rare and unless
3587     // Symbol.hasInstance becomes popular will likely remain that way.
3588
3589     JSValueOperand value(this, node->child1());
3590     SpeculateCellOperand constructor(this, node->child2());
3591     JSValueOperand hasInstanceValue(this, node->child3());
3592     GPRTemporary result(this);
3593
3594     JSValueRegs valueRegs = value.jsValueRegs();
3595     GPRReg constructorGPR = constructor.gpr();
3596     JSValueRegs hasInstanceRegs = hasInstanceValue.jsValueRegs();
3597     GPRReg resultGPR = result.gpr();
3598
3599     MacroAssembler::Jump slowCase = m_jit.jump();
3600
3601     addSlowPathGenerator(slowPathCall(slowCase, this, operationInstanceOfCustom, resultGPR, valueRegs, constructorGPR, hasInstanceRegs));
3602
3603     unblessedBooleanResult(resultGPR, node);
3604 }
3605
3606 void SpeculativeJIT::compileIsCellWithType(Node* node)
3607 {
3608     switch (node->child1().useKind()) {
3609     case UntypedUse: {
3610         JSValueOperand value(this, node->child1());
3611 #if USE(JSVALUE64)
3612         GPRTemporary result(this, Reuse, value);
3613 #else
3614         GPRTemporary result(this, Reuse, value, PayloadWord);
3615 #endif
3616
3617         JSValueRegs valueRegs = value.jsValueRegs();
3618         GPRReg resultGPR = result.gpr();
3619
3620         JITCompiler::Jump isNotCell = m_jit.branchIfNotCell(valueRegs);
3621
3622         m_jit.compare8(JITCompiler::Equal,
3623             JITCompiler::Address(valueRegs.payloadGPR(), JSCell::typeInfoTypeOffset()),
3624             TrustedImm32(node->queriedType()),
3625             resultGPR);
3626         blessBoolean(resultGPR);
3627         JITCompiler::Jump done = m_jit.jump();
3628
3629         isNotCell.link(&m_jit);
3630         moveFalseTo(resultGPR);
3631
3632         done.link(&m_jit);
3633         blessedBooleanResult(resultGPR, node);
3634         return;
3635     }
3636
3637     case CellUse: {
3638         SpeculateCellOperand cell(this, node->child1());
3639         GPRTemporary result(this, Reuse, cell);
3640
3641         GPRReg cellGPR = cell.gpr();
3642         GPRReg resultGPR = result.gpr();
3643
3644         m_jit.compare8(JITCompiler::Equal,
3645             JITCompiler::Address(cellGPR, JSCell::typeInfoTypeOffset()),
3646             TrustedImm32(node->queriedType()),
3647             resultGPR);
3648         blessBoolean(resultGPR);
3649         blessedBooleanResult(resultGPR, node);
3650         return;
3651     }
3652
3653     default:
3654         RELEASE_ASSERT_NOT_REACHED();
3655         break;
3656     }
3657 }
3658
3659 void SpeculativeJIT::compileIsTypedArrayView(Node* node)
3660 {
3661     JSValueOperand value(this, node->child1());
3662 #if USE(JSVALUE64)
3663     GPRTemporary result(this, Reuse, value);
3664 #else
3665     GPRTemporary result(this, Reuse, value, PayloadWord);
3666 #endif
3667
3668     JSValueRegs valueRegs = value.jsValueRegs();
3669     GPRReg resultGPR = result.gpr();
3670
3671     JITCompiler::Jump isNotCell = m_jit.branchIfNotCell(valueRegs);
3672
3673     m_jit.load8(JITCompiler::Address(valueRegs.payloadGPR(), JSCell::typeInfoTypeOffset()), resultGPR);
3674     m_jit.sub32(TrustedImm32(Int8ArrayType), resultGPR);
3675     m_jit.compare32(JITCompiler::BelowOrEqual,
3676         resultGPR,
3677         TrustedImm32(Float64ArrayType - Int8ArrayType),
3678         resultGPR);
3679     blessBoolean(resultGPR);
3680     JITCompiler::Jump done = m_jit.jump();
3681
3682     isNotCell.link(&m_jit);
3683     moveFalseTo(resultGPR);
3684
3685     done.link(&m_jit);
3686     blessedBooleanResult(resultGPR, node);
3687 }
3688
3689 void SpeculativeJIT::compileCallObjectConstructor(Node* node)
3690 {
3691     RELEASE_ASSERT(node->child1().useKind() == UntypedUse);
3692     JSValueOperand value(this, node->child1());
3693 #if USE(JSVALUE64)
3694     GPRTemporary result(this, Reuse, value);
3695 #else
3696     GPRTemporary result(this, Reuse, value, PayloadWord);
3697 #endif
3698
3699     JSValueRegs valueRegs = value.jsValueRegs();
3700     GPRReg resultGPR = result.gpr();
3701
3702     MacroAssembler::JumpList slowCases;
3703     slowCases.append(m_jit.branchIfNotCell(valueRegs));
3704     slowCases.append(m_jit.branchIfNotObject(valueRegs.payloadGPR()));
3705     m_jit.move(valueRegs.payloadGPR(), resultGPR);
3706
3707     addSlowPathGenerator(slowPathCall(slowCases, this, operationObjectConstructor, resultGPR, m_jit.globalObjectFor(node->origin.semantic), valueRegs));
3708     cellResult(resultGPR, node);
3709 }
3710
3711 void SpeculativeJIT::compileArithAdd(Node* node)
3712 {
3713     switch (node->binaryUseKind()) {
3714     case Int32Use: {
3715         ASSERT(!shouldCheckNegativeZero(node->arithMode()));
3716
3717         if (node->child2()->isInt32Constant()) {
3718             SpeculateInt32Operand op1(this, node->child1());
3719             GPRTemporary result(this, Reuse, op1);
3720
3721             GPRReg gpr1 = op1.gpr();
3722             int32_t imm2 = node->child2()->asInt32();
3723             GPRReg gprResult = result.gpr();
3724
3725             if (!shouldCheckOverflow(node->arithMode())) {
3726                 m_jit.add32(Imm32(imm2), gpr1, gprResult);
3727                 int32Result(gprResult, node);
3728                 return;
3729             }
3730
3731             MacroAssembler::Jump check = m_jit.branchAdd32(MacroAssembler::Overflow, gpr1, Imm32(imm2), gprResult);
3732             if (gpr1 == gprResult) {
3733                 speculationCheck(Overflow, JSValueRegs(), 0, check,
3734                     SpeculationRecovery(SpeculativeAddImmediate, gpr1, imm2));
3735             } else
3736                 speculationCheck(Overflow, JSValueRegs(), 0, check);
3737
3738             int32Result(gprResult, node);
3739             return;
3740         }
3741                 
3742         SpeculateInt32Operand op1(this, node->child1());
3743         SpeculateInt32Operand op2(this, node->child2());
3744         GPRTemporary result(this, Reuse, op1, op2);
3745
3746         GPRReg gpr1 = op1.gpr();
3747         GPRReg gpr2 = op2.gpr();
3748         GPRReg gprResult = result.gpr();
3749
3750         if (!shouldCheckOverflow(node->arithMode()))
3751             m_jit.add32(gpr1, gpr2, gprResult);
3752         else {
3753             MacroAssembler::Jump check = m_jit.branchAdd32(MacroAssembler::Overflow, gpr1, gpr2, gprResult);
3754                 
3755             if (gpr1 == gprResult)
3756                 speculationCheck(Overflow, JSValueRegs(), 0, check, SpeculationRecovery(SpeculativeAdd, gprResult, gpr2));
3757             else if (gpr2 == gprResult)
3758                 speculationCheck(Overflow, JSValueRegs(), 0, check, SpeculationRecovery(SpeculativeAdd, gprResult, gpr1));
3759             else
3760                 speculationCheck(Overflow, JSValueRegs(), 0, check);
3761         }
3762
3763         int32Result(gprResult, node);
3764         return;
3765     }
3766         
3767 #if USE(JSVALUE64)
3768     case Int52RepUse: {
3769         ASSERT(shouldCheckOverflow(node->arithMode()));
3770         ASSERT(!shouldCheckNegativeZero(node->arithMode()));
3771
3772         // Will we need an overflow check? If we can prove that neither input can be
3773         // Int52 then the overflow check will not be necessary.
3774         if (!m_state.forNode(node->child1()).couldBeType(SpecInt52Only)
3775             && !m_state.forNode(node->child2()).couldBeType(SpecInt52Only)) {
3776             SpeculateWhicheverInt52Operand op1(this, node->child1());
3777             SpeculateWhicheverInt52Operand op2(this, node->child2(), op1);
3778             GPRTemporary result(this, Reuse, op1);
3779             m_jit.add64(op1.gpr(), op2.gpr(), result.gpr());
3780             int52Result(result.gpr(), node, op1.format());
3781             return;
3782         }
3783         
3784         SpeculateInt52Operand op1(this, node->child1());
3785         SpeculateInt52Operand op2(this, node->child2());
3786         GPRTemporary result(this);
3787         m_jit.move(op1.gpr(), result.gpr());
3788         speculationCheck(
3789             Int52Overflow, JSValueRegs(), 0,
3790             m_jit.branchAdd64(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
3791         int52Result(result.gpr(), node);
3792         return;
3793     }
3794 #endif // USE(JSVALUE64)
3795     
3796     case DoubleRepUse: {
3797         SpeculateDoubleOperand op1(this, node->child1());
3798         SpeculateDoubleOperand op2(this, node->child2());
3799         FPRTemporary result(this, op1, op2);
3800
3801         FPRReg reg1 = op1.fpr();
3802         FPRReg reg2 = op2.fpr();
3803         m_jit.addDouble(reg1, reg2, result.fpr());
3804
3805         doubleResult(result.fpr(), node);
3806         return;
3807     }
3808         
3809     default:
3810         RELEASE_ASSERT_NOT_REACHED();
3811         break;
3812     }
3813 }
3814
3815 void SpeculativeJIT::compileMakeRope(Node* node)
3816 {
3817     ASSERT(node->child1().useKind() == KnownStringUse);
3818     ASSERT(node->child2().useKind() == KnownStringUse);
3819     ASSERT(!node->child3() || node->child3().useKind() == KnownStringUse);
3820     
3821     SpeculateCellOperand op1(this, node->child1());
3822     SpeculateCellOperand op2(this, node->child2());
3823     SpeculateCellOperand op3(this, node->child3());
3824     GPRTemporary result(this);
3825     GPRTemporary allocator(this);
3826     GPRTemporary scratch(this);
3827     
3828     GPRReg opGPRs[3];
3829     unsigned numOpGPRs;
3830     opGPRs[0] = op1.gpr();
3831     opGPRs[1] = op2.gpr();
3832     if (node->child3()) {
3833         opGPRs[2] = op3.gpr();
3834         numOpGPRs = 3;
3835     } else {
3836         opGPRs[2] = InvalidGPRReg;
3837         numOpGPRs = 2;
3838     }
3839     GPRReg resultGPR = result.gpr();
3840     GPRReg allocatorGPR = allocator.gpr();
3841     GPRReg scratchGPR = scratch.gpr();
3842     
3843     JITCompiler::JumpList slowPath;
3844     MarkedAllocator* markedAllocator = m_jit.vm()->heap.allocatorForObjectWithDestructor(sizeof(JSRopeString));
3845     RELEASE_ASSERT(markedAllocator);
3846     m_jit.move(TrustedImmPtr(markedAllocator), allocatorGPR);
3847     emitAllocateJSCell(resultGPR, markedAllocator, allocatorGPR, TrustedImmPtr(m_jit.vm()->stringStructure.get()), scratchGPR, slowPath);
3848         
3849     m_jit.storePtr(TrustedImmPtr(0), JITCompiler::Address(resultGPR, JSString::offsetOfValue()));
3850     for (unsigned i = 0; i < numOpGPRs; ++i)
3851         m_jit.storePtr(opGPRs[i], JITCompiler::Address(resultGPR, JSRopeString::offsetOfFibers() + sizeof(WriteBarrier<JSString>) * i));
3852     for (unsigned i = numOpGPRs; i < JSRopeString::s_maxInternalRopeLength; ++i)
3853         m_jit.storePtr(TrustedImmPtr(0), JITCompiler::Address(resultGPR, JSRopeString::offsetOfFibers() + sizeof(WriteBarrier<JSString>) * i));
3854     m_jit.load32(JITCompiler::Address(opGPRs[0], JSString::offsetOfFlags()), scratchGPR);
3855     m_jit.load32(JITCompiler::Address(opGPRs[0], JSString::offsetOfLength()), allocatorGPR);
3856     if (!ASSERT_DISABLED) {
3857         JITCompiler::Jump ok = m_jit.branch32(
3858             JITCompiler::GreaterThanOrEqual, allocatorGPR, TrustedImm32(0));
3859         m_jit.abortWithReason(DFGNegativeStringLength);
3860         ok.link(&m_jit);
3861     }
3862     for (unsigned i = 1; i < numOpGPRs; ++i) {
3863         m_jit.and32(JITCompiler::Address(opGPRs[i], JSString::offsetOfFlags()), scratchGPR);
3864         speculationCheck(
3865             Uncountable, JSValueSource(), nullptr,
3866             m_jit.branchAdd32(
3867                 JITCompiler::Overflow,
3868                 JITCompiler::Address(opGPRs[i], JSString::offsetOfLength()), allocatorGPR));
3869     }
3870     m_jit.and32(JITCompiler::TrustedImm32(JSString::Is8Bit), scratchGPR);
3871     m_jit.store32(scratchGPR, JITCompiler::Address(resultGPR, JSString::offsetOfFlags()));
3872     if (!ASSERT_DISABLED) {
3873         JITCompiler::Jump ok = m_jit.branch32(
3874             JITCompiler::GreaterThanOrEqual, allocatorGPR, TrustedImm32(0));
3875         m_jit.abortWithReason(DFGNegativeStringLength);
3876         ok.link(&m_jit);
3877     }
3878     m_jit.store32(allocatorGPR, JITCompiler::Address(resultGPR, JSString::offsetOfLength()));
3879     
3880     switch (numOpGPRs) {
3881     case 2:
3882         addSlowPathGenerator(sl