[JSC] Implement optimized WeakMap and WeakSet
[WebKit-https.git] / Source / JavaScriptCore / dfg / DFGSpeculativeJIT.cpp
1 /*
2  * Copyright (C) 2011-2017 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "DFGSpeculativeJIT.h"
28
29 #if ENABLE(DFG_JIT)
30
31 #include "BinarySwitch.h"
32 #include "DFGAbstractInterpreterInlines.h"
33 #include "DFGArrayifySlowPathGenerator.h"
34 #include "DFGCallArrayAllocatorSlowPathGenerator.h"
35 #include "DFGCallCreateDirectArgumentsSlowPathGenerator.h"
36 #include "DFGCapabilities.h"
37 #include "DFGMayExit.h"
38 #include "DFGOSRExitFuzz.h"
39 #include "DFGSaneStringGetByValSlowPathGenerator.h"
40 #include "DFGSlowPathGenerator.h"
41 #include "DFGSnippetParams.h"
42 #include "DirectArguments.h"
43 #include "JITAddGenerator.h"
44 #include "JITBitAndGenerator.h"
45 #include "JITBitOrGenerator.h"
46 #include "JITBitXorGenerator.h"
47 #include "JITDivGenerator.h"
48 #include "JITLeftShiftGenerator.h"
49 #include "JITMulGenerator.h"
50 #include "JITRightShiftGenerator.h"
51 #include "JITSubGenerator.h"
52 #include "JSAsyncFunction.h"
53 #include "JSAsyncGeneratorFunction.h"
54 #include "JSCInlines.h"
55 #include "JSFixedArray.h"
56 #include "JSGeneratorFunction.h"
57 #include "JSLexicalEnvironment.h"
58 #include "LinkBuffer.h"
59 #include "RegExpConstructor.h"
60 #include "ScopedArguments.h"
61 #include "ScratchRegisterAllocator.h"
62 #include "SuperSampler.h"
63 #include "WeakMapImpl.h"
64 #include <wtf/BitVector.h>
65 #include <wtf/Box.h>
66 #include <wtf/MathExtras.h>
67
68 namespace JSC { namespace DFG {
69
70 SpeculativeJIT::SpeculativeJIT(JITCompiler& jit)
71     : m_compileOkay(true)
72     , m_jit(jit)
73     , m_currentNode(0)
74     , m_lastGeneratedNode(LastNodeType)
75     , m_indexInBlock(0)
76     , m_generationInfo(m_jit.graph().frameRegisterCount())
77     , m_state(m_jit.graph())
78     , m_interpreter(m_jit.graph(), m_state)
79     , m_stream(&jit.jitCode()->variableEventStream)
80     , m_minifiedGraph(&jit.jitCode()->minifiedDFG)
81 {
82 }
83
84 SpeculativeJIT::~SpeculativeJIT()
85 {
86 }
87
88 void SpeculativeJIT::emitAllocateRawObject(GPRReg resultGPR, RegisteredStructure structure, GPRReg storageGPR, unsigned numElements, unsigned vectorLength)
89 {
90     IndexingType indexingType = structure->indexingType();
91     bool hasIndexingHeader = hasIndexedProperties(indexingType);
92
93     unsigned inlineCapacity = structure->inlineCapacity();
94     unsigned outOfLineCapacity = structure->outOfLineCapacity();
95     
96     GPRTemporary scratch(this);
97     GPRTemporary scratch2(this);
98     GPRReg scratchGPR = scratch.gpr();
99     GPRReg scratch2GPR = scratch2.gpr();
100
101     ASSERT(vectorLength >= numElements);
102     vectorLength = Butterfly::optimalContiguousVectorLength(structure.get(), vectorLength);
103     
104     JITCompiler::JumpList slowCases;
105
106     size_t size = 0;
107     if (hasIndexingHeader)
108         size += vectorLength * sizeof(JSValue) + sizeof(IndexingHeader);
109     size += outOfLineCapacity * sizeof(JSValue);
110
111     m_jit.move(TrustedImmPtr(0), storageGPR);
112     
113     if (size) {
114         if (MarkedAllocator* allocator = m_jit.vm()->jsValueGigacageAuxiliarySpace.allocatorForNonVirtual(size, AllocatorForMode::AllocatorIfExists)) {
115             m_jit.move(TrustedImmPtr(allocator), scratchGPR);
116             m_jit.emitAllocate(storageGPR, allocator, scratchGPR, scratch2GPR, slowCases);
117             
118             m_jit.addPtr(
119                 TrustedImm32(outOfLineCapacity * sizeof(JSValue) + sizeof(IndexingHeader)),
120                 storageGPR);
121             
122             if (hasIndexingHeader)
123                 m_jit.store32(TrustedImm32(vectorLength), MacroAssembler::Address(storageGPR, Butterfly::offsetOfVectorLength()));
124         } else
125             slowCases.append(m_jit.jump());
126     }
127
128     size_t allocationSize = JSFinalObject::allocationSize(inlineCapacity);
129     MarkedAllocator* allocatorPtr = subspaceFor<JSFinalObject>(*m_jit.vm())->allocatorForNonVirtual(allocationSize, AllocatorForMode::AllocatorIfExists);
130     if (allocatorPtr) {
131         m_jit.move(TrustedImmPtr(allocatorPtr), scratchGPR);
132         emitAllocateJSObject(resultGPR, allocatorPtr, scratchGPR, TrustedImmPtr(structure), storageGPR, scratch2GPR, slowCases);
133         m_jit.emitInitializeInlineStorage(resultGPR, structure->inlineCapacity());
134     } else
135         slowCases.append(m_jit.jump());
136
137     // I want a slow path that also loads out the storage pointer, and that's
138     // what this custom CallArrayAllocatorSlowPathGenerator gives me. It's a lot
139     // of work for a very small piece of functionality. :-/
140     addSlowPathGenerator(std::make_unique<CallArrayAllocatorSlowPathGenerator>(
141         slowCases, this, operationNewRawObject, resultGPR, storageGPR,
142         structure, vectorLength));
143
144     if (numElements < vectorLength && LIKELY(!hasUndecided(structure->indexingType()))) {
145 #if USE(JSVALUE64)
146         if (hasDouble(structure->indexingType()))
147             m_jit.move(TrustedImm64(bitwise_cast<int64_t>(PNaN)), scratchGPR);
148         else
149             m_jit.move(TrustedImm64(JSValue::encode(JSValue())), scratchGPR);
150         for (unsigned i = numElements; i < vectorLength; ++i)
151             m_jit.store64(scratchGPR, MacroAssembler::Address(storageGPR, sizeof(double) * i));
152 #else
153         EncodedValueDescriptor value;
154         if (hasDouble(structure->indexingType()))
155             value.asInt64 = JSValue::encode(JSValue(JSValue::EncodeAsDouble, PNaN));
156         else
157             value.asInt64 = JSValue::encode(JSValue());
158         for (unsigned i = numElements; i < vectorLength; ++i) {
159             m_jit.store32(TrustedImm32(value.asBits.tag), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
160             m_jit.store32(TrustedImm32(value.asBits.payload), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
161         }
162 #endif
163     }
164     
165     if (hasIndexingHeader)
166         m_jit.store32(TrustedImm32(numElements), MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()));
167     
168     m_jit.emitInitializeOutOfLineStorage(storageGPR, structure->outOfLineCapacity());
169     
170     m_jit.mutatorFence(*m_jit.vm());
171 }
172
173 void SpeculativeJIT::emitGetLength(InlineCallFrame* inlineCallFrame, GPRReg lengthGPR, bool includeThis)
174 {
175     if (inlineCallFrame && !inlineCallFrame->isVarargs())
176         m_jit.move(TrustedImm32(inlineCallFrame->argumentCountIncludingThis - !includeThis), lengthGPR);
177     else {
178         VirtualRegister argumentCountRegister = m_jit.argumentCount(inlineCallFrame);
179         m_jit.load32(JITCompiler::payloadFor(argumentCountRegister), lengthGPR);
180         if (!includeThis)
181             m_jit.sub32(TrustedImm32(1), lengthGPR);
182     }
183 }
184
185 void SpeculativeJIT::emitGetLength(CodeOrigin origin, GPRReg lengthGPR, bool includeThis)
186 {
187     emitGetLength(origin.inlineCallFrame, lengthGPR, includeThis);
188 }
189
190 void SpeculativeJIT::emitGetCallee(CodeOrigin origin, GPRReg calleeGPR)
191 {
192     if (origin.inlineCallFrame) {
193         if (origin.inlineCallFrame->isClosureCall) {
194             m_jit.loadPtr(
195                 JITCompiler::addressFor(origin.inlineCallFrame->calleeRecovery.virtualRegister()),
196                 calleeGPR);
197         } else {
198             m_jit.move(
199                 TrustedImmPtr::weakPointer(m_jit.graph(), origin.inlineCallFrame->calleeRecovery.constant().asCell()),
200                 calleeGPR);
201         }
202     } else
203         m_jit.loadPtr(JITCompiler::addressFor(CallFrameSlot::callee), calleeGPR);
204 }
205
206 void SpeculativeJIT::emitGetArgumentStart(CodeOrigin origin, GPRReg startGPR)
207 {
208     m_jit.addPtr(
209         TrustedImm32(
210             JITCompiler::argumentsStart(origin).offset() * static_cast<int>(sizeof(Register))),
211         GPRInfo::callFrameRegister, startGPR);
212 }
213
214 MacroAssembler::Jump SpeculativeJIT::emitOSRExitFuzzCheck()
215 {
216     if (!Options::useOSRExitFuzz()
217         || !canUseOSRExitFuzzing(m_jit.graph().baselineCodeBlockFor(m_origin.semantic))
218         || !doOSRExitFuzzing())
219         return MacroAssembler::Jump();
220     
221     MacroAssembler::Jump result;
222     
223     m_jit.pushToSave(GPRInfo::regT0);
224     m_jit.load32(&g_numberOfOSRExitFuzzChecks, GPRInfo::regT0);
225     m_jit.add32(TrustedImm32(1), GPRInfo::regT0);
226     m_jit.store32(GPRInfo::regT0, &g_numberOfOSRExitFuzzChecks);
227     unsigned atOrAfter = Options::fireOSRExitFuzzAtOrAfter();
228     unsigned at = Options::fireOSRExitFuzzAt();
229     if (at || atOrAfter) {
230         unsigned threshold;
231         MacroAssembler::RelationalCondition condition;
232         if (atOrAfter) {
233             threshold = atOrAfter;
234             condition = MacroAssembler::Below;
235         } else {
236             threshold = at;
237             condition = MacroAssembler::NotEqual;
238         }
239         MacroAssembler::Jump ok = m_jit.branch32(
240             condition, GPRInfo::regT0, MacroAssembler::TrustedImm32(threshold));
241         m_jit.popToRestore(GPRInfo::regT0);
242         result = m_jit.jump();
243         ok.link(&m_jit);
244     }
245     m_jit.popToRestore(GPRInfo::regT0);
246     
247     return result;
248 }
249
250 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail)
251 {
252     if (!m_compileOkay)
253         return;
254     JITCompiler::Jump fuzzJump = emitOSRExitFuzzCheck();
255     if (fuzzJump.isSet()) {
256         JITCompiler::JumpList jumpsToFail;
257         jumpsToFail.append(fuzzJump);
258         jumpsToFail.append(jumpToFail);
259         m_jit.appendExitInfo(jumpsToFail);
260     } else
261         m_jit.appendExitInfo(jumpToFail);
262     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(m_currentNode, node), this, m_stream->size()));
263 }
264
265 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, const MacroAssembler::JumpList& jumpsToFail)
266 {
267     if (!m_compileOkay)
268         return;
269     JITCompiler::Jump fuzzJump = emitOSRExitFuzzCheck();
270     if (fuzzJump.isSet()) {
271         JITCompiler::JumpList myJumpsToFail;
272         myJumpsToFail.append(jumpsToFail);
273         myJumpsToFail.append(fuzzJump);
274         m_jit.appendExitInfo(myJumpsToFail);
275     } else
276         m_jit.appendExitInfo(jumpsToFail);
277     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(m_currentNode, node), this, m_stream->size()));
278 }
279
280 OSRExitJumpPlaceholder SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node)
281 {
282     if (!m_compileOkay)
283         return OSRExitJumpPlaceholder();
284     unsigned index = m_jit.jitCode()->osrExit.size();
285     m_jit.appendExitInfo();
286     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(m_currentNode, node), this, m_stream->size()));
287     return OSRExitJumpPlaceholder(index);
288 }
289
290 OSRExitJumpPlaceholder SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse)
291 {
292     return speculationCheck(kind, jsValueSource, nodeUse.node());
293 }
294
295 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail)
296 {
297     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail);
298 }
299
300 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, const MacroAssembler::JumpList& jumpsToFail)
301 {
302     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpsToFail);
303 }
304
305 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
306 {
307     if (!m_compileOkay)
308         return;
309     unsigned recoveryIndex = m_jit.jitCode()->appendSpeculationRecovery(recovery);
310     m_jit.appendExitInfo(jumpToFail);
311     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(m_currentNode, node), this, m_stream->size(), recoveryIndex));
312 }
313
314 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
315 {
316     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail, recovery);
317 }
318
319 void SpeculativeJIT::emitInvalidationPoint(Node* node)
320 {
321     if (!m_compileOkay)
322         return;
323     OSRExitCompilationInfo& info = m_jit.appendExitInfo(JITCompiler::JumpList());
324     m_jit.jitCode()->appendOSRExit(OSRExit(
325         UncountableInvalidation, JSValueSource(), MethodOfGettingAValueProfile(),
326         this, m_stream->size()));
327     info.m_replacementSource = m_jit.watchpointLabel();
328     ASSERT(info.m_replacementSource.isSet());
329     noResult(node);
330 }
331
332 void SpeculativeJIT::unreachable(Node* node)
333 {
334     m_compileOkay = false;
335     m_jit.abortWithReason(DFGUnreachableNode, node->op());
336 }
337
338 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Node* node)
339 {
340     if (!m_compileOkay)
341         return;
342     speculationCheck(kind, jsValueRegs, node, m_jit.jump());
343     m_compileOkay = false;
344     if (verboseCompilationEnabled())
345         dataLog("Bailing compilation.\n");
346 }
347
348 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Edge nodeUse)
349 {
350     terminateSpeculativeExecution(kind, jsValueRegs, nodeUse.node());
351 }
352
353 void SpeculativeJIT::typeCheck(JSValueSource source, Edge edge, SpeculatedType typesPassedThrough, MacroAssembler::Jump jumpToFail, ExitKind exitKind)
354 {
355     ASSERT(needsTypeCheck(edge, typesPassedThrough));
356     m_interpreter.filter(edge, typesPassedThrough);
357     speculationCheck(exitKind, source, edge.node(), jumpToFail);
358 }
359
360 RegisterSet SpeculativeJIT::usedRegisters()
361 {
362     RegisterSet result;
363     
364     for (unsigned i = GPRInfo::numberOfRegisters; i--;) {
365         GPRReg gpr = GPRInfo::toRegister(i);
366         if (m_gprs.isInUse(gpr))
367             result.set(gpr);
368     }
369     for (unsigned i = FPRInfo::numberOfRegisters; i--;) {
370         FPRReg fpr = FPRInfo::toRegister(i);
371         if (m_fprs.isInUse(fpr))
372             result.set(fpr);
373     }
374     
375     result.merge(RegisterSet::stubUnavailableRegisters());
376     
377     return result;
378 }
379
380 void SpeculativeJIT::addSlowPathGenerator(std::unique_ptr<SlowPathGenerator> slowPathGenerator)
381 {
382     m_slowPathGenerators.append(WTFMove(slowPathGenerator));
383 }
384
385 void SpeculativeJIT::addSlowPathGenerator(std::function<void()> lambda)
386 {
387     m_slowPathLambdas.append(SlowPathLambda{ lambda, m_currentNode, static_cast<unsigned>(m_stream->size()) });
388 }
389
390 void SpeculativeJIT::runSlowPathGenerators(PCToCodeOriginMapBuilder& pcToCodeOriginMapBuilder)
391 {
392     for (auto& slowPathGenerator : m_slowPathGenerators) {
393         pcToCodeOriginMapBuilder.appendItem(m_jit.labelIgnoringWatchpoints(), slowPathGenerator->origin().semantic);
394         slowPathGenerator->generate(this);
395     }
396     for (auto& slowPathLambda : m_slowPathLambdas) {
397         Node* currentNode = slowPathLambda.currentNode;
398         m_currentNode = currentNode;
399         m_outOfLineStreamIndex = slowPathLambda.streamIndex;
400         pcToCodeOriginMapBuilder.appendItem(m_jit.labelIgnoringWatchpoints(), currentNode->origin.semantic);
401         slowPathLambda.generator();
402         m_outOfLineStreamIndex = std::nullopt;
403     }
404 }
405
406 void SpeculativeJIT::clearGenerationInfo()
407 {
408     for (unsigned i = 0; i < m_generationInfo.size(); ++i)
409         m_generationInfo[i] = GenerationInfo();
410     m_gprs = RegisterBank<GPRInfo>();
411     m_fprs = RegisterBank<FPRInfo>();
412 }
413
414 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForGPR(VirtualRegister spillMe, GPRReg source)
415 {
416     GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
417     Node* node = info.node();
418     DataFormat registerFormat = info.registerFormat();
419     ASSERT(registerFormat != DataFormatNone);
420     ASSERT(registerFormat != DataFormatDouble);
421         
422     SilentSpillAction spillAction;
423     SilentFillAction fillAction;
424         
425     if (!info.needsSpill())
426         spillAction = DoNothingForSpill;
427     else {
428 #if USE(JSVALUE64)
429         ASSERT(info.gpr() == source);
430         if (registerFormat == DataFormatInt32)
431             spillAction = Store32Payload;
432         else if (registerFormat == DataFormatCell || registerFormat == DataFormatStorage)
433             spillAction = StorePtr;
434         else if (registerFormat == DataFormatInt52 || registerFormat == DataFormatStrictInt52)
435             spillAction = Store64;
436         else {
437             ASSERT(registerFormat & DataFormatJS);
438             spillAction = Store64;
439         }
440 #elif USE(JSVALUE32_64)
441         if (registerFormat & DataFormatJS) {
442             ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
443             spillAction = source == info.tagGPR() ? Store32Tag : Store32Payload;
444         } else {
445             ASSERT(info.gpr() == source);
446             spillAction = Store32Payload;
447         }
448 #endif
449     }
450         
451     if (registerFormat == DataFormatInt32) {
452         ASSERT(info.gpr() == source);
453         ASSERT(isJSInt32(info.registerFormat()));
454         if (node->hasConstant()) {
455             ASSERT(node->isInt32Constant());
456             fillAction = SetInt32Constant;
457         } else
458             fillAction = Load32Payload;
459     } else if (registerFormat == DataFormatBoolean) {
460 #if USE(JSVALUE64)
461         RELEASE_ASSERT_NOT_REACHED();
462 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
463         fillAction = DoNothingForFill;
464 #endif
465 #elif USE(JSVALUE32_64)
466         ASSERT(info.gpr() == source);
467         if (node->hasConstant()) {
468             ASSERT(node->isBooleanConstant());
469             fillAction = SetBooleanConstant;
470         } else
471             fillAction = Load32Payload;
472 #endif
473     } else if (registerFormat == DataFormatCell) {
474         ASSERT(info.gpr() == source);
475         if (node->hasConstant()) {
476             DFG_ASSERT(m_jit.graph(), m_currentNode, node->isCellConstant());
477             node->asCell(); // To get the assertion.
478             fillAction = SetCellConstant;
479         } else {
480 #if USE(JSVALUE64)
481             fillAction = LoadPtr;
482 #else
483             fillAction = Load32Payload;
484 #endif
485         }
486     } else if (registerFormat == DataFormatStorage) {
487         ASSERT(info.gpr() == source);
488         fillAction = LoadPtr;
489     } else if (registerFormat == DataFormatInt52) {
490         if (node->hasConstant())
491             fillAction = SetInt52Constant;
492         else if (info.spillFormat() == DataFormatInt52)
493             fillAction = Load64;
494         else if (info.spillFormat() == DataFormatStrictInt52)
495             fillAction = Load64ShiftInt52Left;
496         else if (info.spillFormat() == DataFormatNone)
497             fillAction = Load64;
498         else {
499             RELEASE_ASSERT_NOT_REACHED();
500 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
501             fillAction = Load64; // Make GCC happy.
502 #endif
503         }
504     } else if (registerFormat == DataFormatStrictInt52) {
505         if (node->hasConstant())
506             fillAction = SetStrictInt52Constant;
507         else if (info.spillFormat() == DataFormatInt52)
508             fillAction = Load64ShiftInt52Right;
509         else if (info.spillFormat() == DataFormatStrictInt52)
510             fillAction = Load64;
511         else if (info.spillFormat() == DataFormatNone)
512             fillAction = Load64;
513         else {
514             RELEASE_ASSERT_NOT_REACHED();
515 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
516             fillAction = Load64; // Make GCC happy.
517 #endif
518         }
519     } else {
520         ASSERT(registerFormat & DataFormatJS);
521 #if USE(JSVALUE64)
522         ASSERT(info.gpr() == source);
523         if (node->hasConstant()) {
524             if (node->isCellConstant())
525                 fillAction = SetTrustedJSConstant;
526             else
527                 fillAction = SetJSConstant;
528         } else if (info.spillFormat() == DataFormatInt32) {
529             ASSERT(registerFormat == DataFormatJSInt32);
530             fillAction = Load32PayloadBoxInt;
531         } else
532             fillAction = Load64;
533 #else
534         ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
535         if (node->hasConstant())
536             fillAction = info.tagGPR() == source ? SetJSConstantTag : SetJSConstantPayload;
537         else if (info.payloadGPR() == source)
538             fillAction = Load32Payload;
539         else { // Fill the Tag
540             switch (info.spillFormat()) {
541             case DataFormatInt32:
542                 ASSERT(registerFormat == DataFormatJSInt32);
543                 fillAction = SetInt32Tag;
544                 break;
545             case DataFormatCell:
546                 ASSERT(registerFormat == DataFormatJSCell);
547                 fillAction = SetCellTag;
548                 break;
549             case DataFormatBoolean:
550                 ASSERT(registerFormat == DataFormatJSBoolean);
551                 fillAction = SetBooleanTag;
552                 break;
553             default:
554                 fillAction = Load32Tag;
555                 break;
556             }
557         }
558 #endif
559     }
560         
561     return SilentRegisterSavePlan(spillAction, fillAction, node, source);
562 }
563     
564 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForFPR(VirtualRegister spillMe, FPRReg source)
565 {
566     GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
567     Node* node = info.node();
568     ASSERT(info.registerFormat() == DataFormatDouble);
569
570     SilentSpillAction spillAction;
571     SilentFillAction fillAction;
572         
573     if (!info.needsSpill())
574         spillAction = DoNothingForSpill;
575     else {
576         ASSERT(!node->hasConstant());
577         ASSERT(info.spillFormat() == DataFormatNone);
578         ASSERT(info.fpr() == source);
579         spillAction = StoreDouble;
580     }
581         
582 #if USE(JSVALUE64)
583     if (node->hasConstant()) {
584         node->asNumber(); // To get the assertion.
585         fillAction = SetDoubleConstant;
586     } else {
587         ASSERT(info.spillFormat() == DataFormatNone || info.spillFormat() == DataFormatDouble);
588         fillAction = LoadDouble;
589     }
590 #elif USE(JSVALUE32_64)
591     ASSERT(info.registerFormat() == DataFormatDouble);
592     if (node->hasConstant()) {
593         node->asNumber(); // To get the assertion.
594         fillAction = SetDoubleConstant;
595     } else
596         fillAction = LoadDouble;
597 #endif
598
599     return SilentRegisterSavePlan(spillAction, fillAction, node, source);
600 }
601     
602 void SpeculativeJIT::silentSpill(const SilentRegisterSavePlan& plan)
603 {
604     switch (plan.spillAction()) {
605     case DoNothingForSpill:
606         break;
607     case Store32Tag:
608         m_jit.store32(plan.gpr(), JITCompiler::tagFor(plan.node()->virtualRegister()));
609         break;
610     case Store32Payload:
611         m_jit.store32(plan.gpr(), JITCompiler::payloadFor(plan.node()->virtualRegister()));
612         break;
613     case StorePtr:
614         m_jit.storePtr(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
615         break;
616 #if USE(JSVALUE64)
617     case Store64:
618         m_jit.store64(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
619         break;
620 #endif
621     case StoreDouble:
622         m_jit.storeDouble(plan.fpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
623         break;
624     default:
625         RELEASE_ASSERT_NOT_REACHED();
626     }
627 }
628     
629 void SpeculativeJIT::silentFill(const SilentRegisterSavePlan& plan)
630 {
631     switch (plan.fillAction()) {
632     case DoNothingForFill:
633         break;
634     case SetInt32Constant:
635         m_jit.move(Imm32(plan.node()->asInt32()), plan.gpr());
636         break;
637 #if USE(JSVALUE64)
638     case SetInt52Constant:
639         m_jit.move(Imm64(plan.node()->asAnyInt() << JSValue::int52ShiftAmount), plan.gpr());
640         break;
641     case SetStrictInt52Constant:
642         m_jit.move(Imm64(plan.node()->asAnyInt()), plan.gpr());
643         break;
644 #endif // USE(JSVALUE64)
645     case SetBooleanConstant:
646         m_jit.move(TrustedImm32(plan.node()->asBoolean()), plan.gpr());
647         break;
648     case SetCellConstant:
649         ASSERT(plan.node()->constant()->value().isCell());
650         m_jit.move(TrustedImmPtr(plan.node()->constant()), plan.gpr());
651         break;
652 #if USE(JSVALUE64)
653     case SetTrustedJSConstant:
654         m_jit.move(valueOfJSConstantAsImm64(plan.node()).asTrustedImm64(), plan.gpr());
655         break;
656     case SetJSConstant:
657         m_jit.move(valueOfJSConstantAsImm64(plan.node()), plan.gpr());
658         break;
659     case SetDoubleConstant:
660         m_jit.moveDouble(Imm64(reinterpretDoubleToInt64(plan.node()->asNumber())), plan.fpr());
661         break;
662     case Load32PayloadBoxInt:
663         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
664         m_jit.or64(GPRInfo::tagTypeNumberRegister, plan.gpr());
665         break;
666     case Load32PayloadConvertToInt52:
667         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
668         m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
669         m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
670         break;
671     case Load32PayloadSignExtend:
672         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
673         m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
674         break;
675 #else
676     case SetJSConstantTag:
677         m_jit.move(Imm32(plan.node()->asJSValue().tag()), plan.gpr());
678         break;
679     case SetJSConstantPayload:
680         m_jit.move(Imm32(plan.node()->asJSValue().payload()), plan.gpr());
681         break;
682     case SetInt32Tag:
683         m_jit.move(TrustedImm32(JSValue::Int32Tag), plan.gpr());
684         break;
685     case SetCellTag:
686         m_jit.move(TrustedImm32(JSValue::CellTag), plan.gpr());
687         break;
688     case SetBooleanTag:
689         m_jit.move(TrustedImm32(JSValue::BooleanTag), plan.gpr());
690         break;
691     case SetDoubleConstant:
692         m_jit.loadDouble(TrustedImmPtr(m_jit.addressOfDoubleConstant(plan.node())), plan.fpr());
693         break;
694 #endif
695     case Load32Tag:
696         m_jit.load32(JITCompiler::tagFor(plan.node()->virtualRegister()), plan.gpr());
697         break;
698     case Load32Payload:
699         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
700         break;
701     case LoadPtr:
702         m_jit.loadPtr(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
703         break;
704 #if USE(JSVALUE64)
705     case Load64:
706         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
707         break;
708     case Load64ShiftInt52Right:
709         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
710         m_jit.rshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
711         break;
712     case Load64ShiftInt52Left:
713         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
714         m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
715         break;
716 #endif
717     case LoadDouble:
718         m_jit.loadDouble(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.fpr());
719         break;
720     default:
721         RELEASE_ASSERT_NOT_REACHED();
722     }
723 }
724     
725 JITCompiler::Jump SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode, IndexingType shape)
726 {
727     switch (arrayMode.arrayClass()) {
728     case Array::OriginalArray: {
729         CRASH();
730 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
731         JITCompiler::Jump result; // I already know that VC++ takes unkindly to the expression "return Jump()", so I'm doing it this way in anticipation of someone eventually using VC++ to compile the DFG.
732         return result;
733 #endif
734     }
735         
736     case Array::Array:
737         m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
738         return m_jit.branch32(
739             MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | shape));
740         
741     case Array::NonArray:
742     case Array::OriginalNonArray:
743         m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
744         return m_jit.branch32(
745             MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
746         
747     case Array::PossiblyArray:
748         m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
749         return m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
750     }
751     
752     RELEASE_ASSERT_NOT_REACHED();
753     return JITCompiler::Jump();
754 }
755
756 JITCompiler::JumpList SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode)
757 {
758     JITCompiler::JumpList result;
759     
760     switch (arrayMode.type()) {
761     case Array::Int32:
762     case Array::Double:
763     case Array::Contiguous:
764     case Array::Undecided:
765         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, arrayMode.shapeMask());
766
767     case Array::ArrayStorage:
768     case Array::SlowPutArrayStorage: {
769         ASSERT(!arrayMode.isJSArrayWithOriginalStructure());
770         
771         if (arrayMode.isJSArray()) {
772             if (arrayMode.isSlowPut()) {
773                 result.append(
774                     m_jit.branchTest32(
775                         MacroAssembler::Zero, tempGPR, MacroAssembler::TrustedImm32(IsArray)));
776                 m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
777                 m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
778                 result.append(
779                     m_jit.branch32(
780                         MacroAssembler::Above, tempGPR,
781                         TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
782                 break;
783             }
784             m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
785             result.append(
786                 m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | ArrayStorageShape)));
787             break;
788         }
789         m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
790         if (arrayMode.isSlowPut()) {
791             m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
792             result.append(
793                 m_jit.branch32(
794                     MacroAssembler::Above, tempGPR,
795                     TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
796             break;
797         }
798         result.append(
799             m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(ArrayStorageShape)));
800         break;
801     }
802     default:
803         CRASH();
804         break;
805     }
806     
807     return result;
808 }
809
810 void SpeculativeJIT::checkArray(Node* node)
811 {
812     ASSERT(node->arrayMode().isSpecific());
813     ASSERT(!node->arrayMode().doesConversion());
814     
815     SpeculateCellOperand base(this, node->child1());
816     GPRReg baseReg = base.gpr();
817     
818     if (node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1()))) {
819         noResult(m_currentNode);
820         return;
821     }
822     
823     const ClassInfo* expectedClassInfo = 0;
824     
825     switch (node->arrayMode().type()) {
826     case Array::AnyTypedArray:
827     case Array::String:
828         RELEASE_ASSERT_NOT_REACHED(); // Should have been a Phantom(String:)
829         break;
830     case Array::Int32:
831     case Array::Double:
832     case Array::Contiguous:
833     case Array::Undecided:
834     case Array::ArrayStorage:
835     case Array::SlowPutArrayStorage: {
836         GPRTemporary temp(this);
837         GPRReg tempGPR = temp.gpr();
838         m_jit.load8(MacroAssembler::Address(baseReg, JSCell::indexingTypeAndMiscOffset()), tempGPR);
839         speculationCheck(
840             BadIndexingType, JSValueSource::unboxedCell(baseReg), 0,
841             jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
842         
843         noResult(m_currentNode);
844         return;
845     }
846     case Array::DirectArguments:
847         speculateCellTypeWithoutTypeFiltering(node->child1(), baseReg, DirectArgumentsType);
848         noResult(m_currentNode);
849         return;
850     case Array::ScopedArguments:
851         speculateCellTypeWithoutTypeFiltering(node->child1(), baseReg, ScopedArgumentsType);
852         noResult(m_currentNode);
853         return;
854     default:
855         speculateCellTypeWithoutTypeFiltering(
856             node->child1(), baseReg,
857             typeForTypedArrayType(node->arrayMode().typedArrayType()));
858         noResult(m_currentNode);
859         return;
860     }
861     
862     RELEASE_ASSERT(expectedClassInfo);
863     
864     GPRTemporary temp(this);
865     GPRTemporary temp2(this);
866     m_jit.emitLoadStructure(*m_jit.vm(), baseReg, temp.gpr(), temp2.gpr());
867     speculationCheck(
868         BadType, JSValueSource::unboxedCell(baseReg), node,
869         m_jit.branchPtr(
870             MacroAssembler::NotEqual,
871             MacroAssembler::Address(temp.gpr(), Structure::classInfoOffset()),
872             TrustedImmPtr(PoisonedClassInfoPtr(expectedClassInfo).bits())));
873
874     noResult(m_currentNode);
875 }
876
877 void SpeculativeJIT::arrayify(Node* node, GPRReg baseReg, GPRReg propertyReg)
878 {
879     ASSERT(node->arrayMode().doesConversion());
880     
881     GPRTemporary temp(this);
882     GPRTemporary structure;
883     GPRReg tempGPR = temp.gpr();
884     GPRReg structureGPR = InvalidGPRReg;
885     
886     if (node->op() != ArrayifyToStructure) {
887         GPRTemporary realStructure(this);
888         structure.adopt(realStructure);
889         structureGPR = structure.gpr();
890     }
891         
892     // We can skip all that comes next if we already have array storage.
893     MacroAssembler::JumpList slowPath;
894     
895     if (node->op() == ArrayifyToStructure) {
896         slowPath.append(m_jit.branchWeakStructure(
897             JITCompiler::NotEqual,
898             JITCompiler::Address(baseReg, JSCell::structureIDOffset()),
899             node->structure()));
900     } else {
901         m_jit.load8(
902             MacroAssembler::Address(baseReg, JSCell::indexingTypeAndMiscOffset()), tempGPR);
903         
904         slowPath.append(jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
905     }
906     
907     addSlowPathGenerator(std::make_unique<ArrayifySlowPathGenerator>(
908         slowPath, this, node, baseReg, propertyReg, tempGPR, structureGPR));
909     
910     noResult(m_currentNode);
911 }
912
913 void SpeculativeJIT::arrayify(Node* node)
914 {
915     ASSERT(node->arrayMode().isSpecific());
916     
917     SpeculateCellOperand base(this, node->child1());
918     
919     if (!node->child2()) {
920         arrayify(node, base.gpr(), InvalidGPRReg);
921         return;
922     }
923     
924     SpeculateInt32Operand property(this, node->child2());
925     
926     arrayify(node, base.gpr(), property.gpr());
927 }
928
929 GPRReg SpeculativeJIT::fillStorage(Edge edge)
930 {
931     VirtualRegister virtualRegister = edge->virtualRegister();
932     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
933     
934     switch (info.registerFormat()) {
935     case DataFormatNone: {
936         if (info.spillFormat() == DataFormatStorage) {
937             GPRReg gpr = allocate();
938             m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
939             m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr);
940             info.fillStorage(*m_stream, gpr);
941             return gpr;
942         }
943         
944         // Must be a cell; fill it as a cell and then return the pointer.
945         return fillSpeculateCell(edge);
946     }
947         
948     case DataFormatStorage: {
949         GPRReg gpr = info.gpr();
950         m_gprs.lock(gpr);
951         return gpr;
952     }
953         
954     default:
955         return fillSpeculateCell(edge);
956     }
957 }
958
959 void SpeculativeJIT::useChildren(Node* node)
960 {
961     if (node->flags() & NodeHasVarArgs) {
962         for (unsigned childIdx = node->firstChild(); childIdx < node->firstChild() + node->numChildren(); childIdx++) {
963             if (!!m_jit.graph().m_varArgChildren[childIdx])
964                 use(m_jit.graph().m_varArgChildren[childIdx]);
965         }
966     } else {
967         Edge child1 = node->child1();
968         if (!child1) {
969             ASSERT(!node->child2() && !node->child3());
970             return;
971         }
972         use(child1);
973         
974         Edge child2 = node->child2();
975         if (!child2) {
976             ASSERT(!node->child3());
977             return;
978         }
979         use(child2);
980         
981         Edge child3 = node->child3();
982         if (!child3)
983             return;
984         use(child3);
985     }
986 }
987
988 void SpeculativeJIT::compileTryGetById(Node* node)
989 {
990     switch (node->child1().useKind()) {
991     case CellUse: {
992         SpeculateCellOperand base(this, node->child1());
993         JSValueRegsTemporary result(this, Reuse, base);
994
995         JSValueRegs baseRegs = JSValueRegs::payloadOnly(base.gpr());
996         JSValueRegs resultRegs = result.regs();
997
998         base.use();
999
1000         cachedGetById(node->origin.semantic, baseRegs, resultRegs, node->identifierNumber(), JITCompiler::Jump(), NeedToSpill, AccessType::TryGet);
1001
1002         jsValueResult(resultRegs, node, DataFormatJS, UseChildrenCalledExplicitly);
1003         break;
1004     }
1005
1006     case UntypedUse: {
1007         JSValueOperand base(this, node->child1());
1008         JSValueRegsTemporary result(this, Reuse, base);
1009
1010         JSValueRegs baseRegs = base.jsValueRegs();
1011         JSValueRegs resultRegs = result.regs();
1012
1013         base.use();
1014
1015         JITCompiler::Jump notCell = m_jit.branchIfNotCell(baseRegs);
1016
1017         cachedGetById(node->origin.semantic, baseRegs, resultRegs, node->identifierNumber(), notCell, NeedToSpill, AccessType::TryGet);
1018
1019         jsValueResult(resultRegs, node, DataFormatJS, UseChildrenCalledExplicitly);
1020         break;
1021     }
1022
1023     default:
1024         DFG_CRASH(m_jit.graph(), node, "Bad use kind");
1025         break;
1026     } 
1027 }
1028
1029 void SpeculativeJIT::compileIn(Node* node)
1030 {
1031     SpeculateCellOperand base(this, node->child1());
1032     GPRReg baseGPR = base.gpr();
1033     
1034     if (JSString* string = node->child2()->dynamicCastConstant<JSString*>(*m_jit.vm())) {
1035         if (string->tryGetValueImpl() && string->tryGetValueImpl()->isAtomic()) {
1036             StructureStubInfo* stubInfo = m_jit.codeBlock()->addStubInfo(AccessType::In);
1037             
1038             GPRTemporary result(this);
1039             GPRReg resultGPR = result.gpr();
1040
1041             use(node->child2());
1042             
1043             MacroAssembler::PatchableJump jump = m_jit.patchableJump();
1044             MacroAssembler::Label done = m_jit.label();
1045             
1046             // Since this block is executed only when the result of string->tryGetValueImpl() is atomic,
1047             // we can cast it to const AtomicStringImpl* safely.
1048             auto slowPath = slowPathCall(
1049                 jump.m_jump, this, operationInOptimize,
1050                 JSValueRegs::payloadOnly(resultGPR), stubInfo, baseGPR,
1051                 static_cast<const AtomicStringImpl*>(string->tryGetValueImpl()));
1052             
1053             stubInfo->callSiteIndex = m_jit.addCallSite(node->origin.semantic);
1054             stubInfo->codeOrigin = node->origin.semantic;
1055             stubInfo->patch.baseGPR = static_cast<int8_t>(baseGPR);
1056             stubInfo->patch.valueGPR = static_cast<int8_t>(resultGPR);
1057             stubInfo->patch.thisGPR = static_cast<int8_t>(InvalidGPRReg);
1058 #if USE(JSVALUE32_64)
1059             stubInfo->patch.valueTagGPR = static_cast<int8_t>(InvalidGPRReg);
1060             stubInfo->patch.baseTagGPR = static_cast<int8_t>(InvalidGPRReg);
1061             stubInfo->patch.thisTagGPR = static_cast<int8_t>(InvalidGPRReg);
1062 #endif
1063             stubInfo->patch.usedRegisters = usedRegisters();
1064
1065             m_jit.addIn(InRecord(jump, done, slowPath.get(), stubInfo));
1066             addSlowPathGenerator(WTFMove(slowPath));
1067
1068             base.use();
1069
1070             blessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
1071             return;
1072         }
1073     }
1074
1075     JSValueOperand key(this, node->child2());
1076     JSValueRegs regs = key.jsValueRegs();
1077         
1078     GPRFlushedCallResult result(this);
1079     GPRReg resultGPR = result.gpr();
1080         
1081     base.use();
1082     key.use();
1083         
1084     flushRegisters();
1085     callOperation(
1086         operationGenericIn, extractResult(JSValueRegs::payloadOnly(resultGPR)),
1087         baseGPR, regs);
1088     m_jit.exceptionCheck();
1089     blessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
1090 }
1091
1092 void SpeculativeJIT::compileDeleteById(Node* node)
1093 {
1094     JSValueOperand value(this, node->child1());
1095     GPRFlushedCallResult result(this);
1096
1097     JSValueRegs valueRegs = value.jsValueRegs();
1098     GPRReg resultGPR = result.gpr();
1099
1100     value.use();
1101
1102     flushRegisters();
1103     callOperation(operationDeleteById, resultGPR, valueRegs, identifierUID(node->identifierNumber()));
1104     m_jit.exceptionCheck();
1105
1106     unblessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
1107 }
1108
1109 void SpeculativeJIT::compileDeleteByVal(Node* node)
1110 {
1111     JSValueOperand base(this, node->child1());
1112     JSValueOperand key(this, node->child2());
1113     GPRFlushedCallResult result(this);
1114
1115     JSValueRegs baseRegs = base.jsValueRegs();
1116     JSValueRegs keyRegs = key.jsValueRegs();
1117     GPRReg resultGPR = result.gpr();
1118
1119     base.use();
1120     key.use();
1121
1122     flushRegisters();
1123     callOperation(operationDeleteByVal, resultGPR, baseRegs, keyRegs);
1124     m_jit.exceptionCheck();
1125
1126     unblessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
1127 }
1128
1129 void SpeculativeJIT::compilePushWithScope(Node* node)
1130 {
1131     SpeculateCellOperand currentScope(this, node->child1());
1132     GPRReg currentScopeGPR = currentScope.gpr();
1133
1134     GPRFlushedCallResult result(this);
1135     GPRReg resultGPR = result.gpr();
1136
1137     auto objectEdge = node->child2();
1138     if (objectEdge.useKind() == ObjectUse) {
1139         SpeculateCellOperand object(this, objectEdge);
1140         GPRReg objectGPR = object.gpr();
1141         speculateObject(objectEdge, objectGPR);
1142
1143         flushRegisters();
1144         callOperation(operationPushWithScopeObject, resultGPR, currentScopeGPR, objectGPR);
1145         // No exception check here as we did not have to call toObject().
1146     } else {
1147         ASSERT(objectEdge.useKind() == UntypedUse);
1148         JSValueOperand object(this, objectEdge);
1149         JSValueRegs objectRegs = object.jsValueRegs();
1150
1151         flushRegisters();
1152         callOperation(operationPushWithScope, resultGPR, currentScopeGPR, objectRegs);
1153         m_jit.exceptionCheck();
1154     }
1155     
1156     cellResult(resultGPR, node);
1157 }
1158
1159 bool SpeculativeJIT::nonSpeculativeCompare(Node* node, MacroAssembler::RelationalCondition cond, S_JITOperation_EJJ helperFunction)
1160 {
1161     unsigned branchIndexInBlock = detectPeepHoleBranch();
1162     if (branchIndexInBlock != UINT_MAX) {
1163         Node* branchNode = m_block->at(branchIndexInBlock);
1164
1165         ASSERT(node->adjustedRefCount() == 1);
1166         
1167         nonSpeculativePeepholeBranch(node, branchNode, cond, helperFunction);
1168     
1169         m_indexInBlock = branchIndexInBlock;
1170         m_currentNode = branchNode;
1171         
1172         return true;
1173     }
1174     
1175     nonSpeculativeNonPeepholeCompare(node, cond, helperFunction);
1176     
1177     return false;
1178 }
1179
1180 bool SpeculativeJIT::nonSpeculativeStrictEq(Node* node, bool invert)
1181 {
1182     unsigned branchIndexInBlock = detectPeepHoleBranch();
1183     if (branchIndexInBlock != UINT_MAX) {
1184         Node* branchNode = m_block->at(branchIndexInBlock);
1185
1186         ASSERT(node->adjustedRefCount() == 1);
1187         
1188         nonSpeculativePeepholeStrictEq(node, branchNode, invert);
1189     
1190         m_indexInBlock = branchIndexInBlock;
1191         m_currentNode = branchNode;
1192         
1193         return true;
1194     }
1195     
1196     nonSpeculativeNonPeepholeStrictEq(node, invert);
1197     
1198     return false;
1199 }
1200
1201 static const char* dataFormatString(DataFormat format)
1202 {
1203     // These values correspond to the DataFormat enum.
1204     const char* strings[] = {
1205         "[  ]",
1206         "[ i]",
1207         "[ d]",
1208         "[ c]",
1209         "Err!",
1210         "Err!",
1211         "Err!",
1212         "Err!",
1213         "[J ]",
1214         "[Ji]",
1215         "[Jd]",
1216         "[Jc]",
1217         "Err!",
1218         "Err!",
1219         "Err!",
1220         "Err!",
1221     };
1222     return strings[format];
1223 }
1224
1225 void SpeculativeJIT::dump(const char* label)
1226 {
1227     if (label)
1228         dataLogF("<%s>\n", label);
1229
1230     dataLogF("  gprs:\n");
1231     m_gprs.dump();
1232     dataLogF("  fprs:\n");
1233     m_fprs.dump();
1234     dataLogF("  VirtualRegisters:\n");
1235     for (unsigned i = 0; i < m_generationInfo.size(); ++i) {
1236         GenerationInfo& info = m_generationInfo[i];
1237         if (info.alive())
1238             dataLogF("    % 3d:%s%s", i, dataFormatString(info.registerFormat()), dataFormatString(info.spillFormat()));
1239         else
1240             dataLogF("    % 3d:[__][__]", i);
1241         if (info.registerFormat() == DataFormatDouble)
1242             dataLogF(":fpr%d\n", info.fpr());
1243         else if (info.registerFormat() != DataFormatNone
1244 #if USE(JSVALUE32_64)
1245             && !(info.registerFormat() & DataFormatJS)
1246 #endif
1247             ) {
1248             ASSERT(info.gpr() != InvalidGPRReg);
1249             dataLogF(":%s\n", GPRInfo::debugName(info.gpr()));
1250         } else
1251             dataLogF("\n");
1252     }
1253     if (label)
1254         dataLogF("</%s>\n", label);
1255 }
1256
1257 GPRTemporary::GPRTemporary()
1258     : m_jit(0)
1259     , m_gpr(InvalidGPRReg)
1260 {
1261 }
1262
1263 GPRTemporary::GPRTemporary(SpeculativeJIT* jit)
1264     : m_jit(jit)
1265     , m_gpr(InvalidGPRReg)
1266 {
1267     m_gpr = m_jit->allocate();
1268 }
1269
1270 GPRTemporary::GPRTemporary(SpeculativeJIT* jit, GPRReg specific)
1271     : m_jit(jit)
1272     , m_gpr(InvalidGPRReg)
1273 {
1274     m_gpr = m_jit->allocate(specific);
1275 }
1276
1277 #if USE(JSVALUE32_64)
1278 GPRTemporary::GPRTemporary(
1279     SpeculativeJIT* jit, ReuseTag, JSValueOperand& op1, WhichValueWord which)
1280     : m_jit(jit)
1281     , m_gpr(InvalidGPRReg)
1282 {
1283     if (!op1.isDouble() && m_jit->canReuse(op1.node()))
1284         m_gpr = m_jit->reuse(op1.gpr(which));
1285     else
1286         m_gpr = m_jit->allocate();
1287 }
1288 #endif // USE(JSVALUE32_64)
1289
1290 JSValueRegsTemporary::JSValueRegsTemporary() { }
1291
1292 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit)
1293 #if USE(JSVALUE64)
1294     : m_gpr(jit)
1295 #else
1296     : m_payloadGPR(jit)
1297     , m_tagGPR(jit)
1298 #endif
1299 {
1300 }
1301
1302 #if USE(JSVALUE64)
1303 template<typename T>
1304 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit, ReuseTag, T& operand, WhichValueWord)
1305     : m_gpr(jit, Reuse, operand)
1306 {
1307 }
1308 #else
1309 template<typename T>
1310 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit, ReuseTag, T& operand, WhichValueWord resultWord)
1311 {
1312     if (resultWord == PayloadWord) {
1313         m_payloadGPR = GPRTemporary(jit, Reuse, operand);
1314         m_tagGPR = GPRTemporary(jit);
1315     } else {
1316         m_payloadGPR = GPRTemporary(jit);
1317         m_tagGPR = GPRTemporary(jit, Reuse, operand);
1318     }
1319 }
1320 #endif
1321
1322 #if USE(JSVALUE64)
1323 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit, ReuseTag, JSValueOperand& operand)
1324 {
1325     m_gpr = GPRTemporary(jit, Reuse, operand);
1326 }
1327 #else
1328 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit, ReuseTag, JSValueOperand& operand)
1329 {
1330     if (jit->canReuse(operand.node())) {
1331         m_payloadGPR = GPRTemporary(jit, Reuse, operand, PayloadWord);
1332         m_tagGPR = GPRTemporary(jit, Reuse, operand, TagWord);
1333     } else {
1334         m_payloadGPR = GPRTemporary(jit);
1335         m_tagGPR = GPRTemporary(jit);
1336     }
1337 }
1338 #endif
1339
1340 JSValueRegsTemporary::~JSValueRegsTemporary() { }
1341
1342 JSValueRegs JSValueRegsTemporary::regs()
1343 {
1344 #if USE(JSVALUE64)
1345     return JSValueRegs(m_gpr.gpr());
1346 #else
1347     return JSValueRegs(m_tagGPR.gpr(), m_payloadGPR.gpr());
1348 #endif
1349 }
1350
1351 void GPRTemporary::adopt(GPRTemporary& other)
1352 {
1353     ASSERT(!m_jit);
1354     ASSERT(m_gpr == InvalidGPRReg);
1355     ASSERT(other.m_jit);
1356     ASSERT(other.m_gpr != InvalidGPRReg);
1357     m_jit = other.m_jit;
1358     m_gpr = other.m_gpr;
1359     other.m_jit = 0;
1360     other.m_gpr = InvalidGPRReg;
1361 }
1362
1363 FPRTemporary::FPRTemporary(FPRTemporary&& other)
1364 {
1365     ASSERT(other.m_jit);
1366     ASSERT(other.m_fpr != InvalidFPRReg);
1367     m_jit = other.m_jit;
1368     m_fpr = other.m_fpr;
1369
1370     other.m_jit = nullptr;
1371 }
1372
1373 FPRTemporary::FPRTemporary(SpeculativeJIT* jit)
1374     : m_jit(jit)
1375     , m_fpr(InvalidFPRReg)
1376 {
1377     m_fpr = m_jit->fprAllocate();
1378 }
1379
1380 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1)
1381     : m_jit(jit)
1382     , m_fpr(InvalidFPRReg)
1383 {
1384     if (m_jit->canReuse(op1.node()))
1385         m_fpr = m_jit->reuse(op1.fpr());
1386     else
1387         m_fpr = m_jit->fprAllocate();
1388 }
1389
1390 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1, SpeculateDoubleOperand& op2)
1391     : m_jit(jit)
1392     , m_fpr(InvalidFPRReg)
1393 {
1394     if (m_jit->canReuse(op1.node()))
1395         m_fpr = m_jit->reuse(op1.fpr());
1396     else if (m_jit->canReuse(op2.node()))
1397         m_fpr = m_jit->reuse(op2.fpr());
1398     else if (m_jit->canReuse(op1.node(), op2.node()) && op1.fpr() == op2.fpr())
1399         m_fpr = m_jit->reuse(op1.fpr());
1400     else
1401         m_fpr = m_jit->fprAllocate();
1402 }
1403
1404 #if USE(JSVALUE32_64)
1405 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, JSValueOperand& op1)
1406     : m_jit(jit)
1407     , m_fpr(InvalidFPRReg)
1408 {
1409     if (op1.isDouble() && m_jit->canReuse(op1.node()))
1410         m_fpr = m_jit->reuse(op1.fpr());
1411     else
1412         m_fpr = m_jit->fprAllocate();
1413 }
1414 #endif
1415
1416 void SpeculativeJIT::compilePeepHoleDoubleBranch(Node* node, Node* branchNode, JITCompiler::DoubleCondition condition)
1417 {
1418     BasicBlock* taken = branchNode->branchData()->taken.block;
1419     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1420
1421     if (taken == nextBlock()) {
1422         condition = MacroAssembler::invert(condition);
1423         std::swap(taken, notTaken);
1424     }
1425
1426     SpeculateDoubleOperand op1(this, node->child1());
1427     SpeculateDoubleOperand op2(this, node->child2());
1428     
1429     branchDouble(condition, op1.fpr(), op2.fpr(), taken);
1430     jump(notTaken);
1431 }
1432
1433 void SpeculativeJIT::compilePeepHoleObjectEquality(Node* node, Node* branchNode)
1434 {
1435     BasicBlock* taken = branchNode->branchData()->taken.block;
1436     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1437
1438     MacroAssembler::RelationalCondition condition = MacroAssembler::Equal;
1439     
1440     if (taken == nextBlock()) {
1441         condition = MacroAssembler::NotEqual;
1442         BasicBlock* tmp = taken;
1443         taken = notTaken;
1444         notTaken = tmp;
1445     }
1446
1447     SpeculateCellOperand op1(this, node->child1());
1448     SpeculateCellOperand op2(this, node->child2());
1449     
1450     GPRReg op1GPR = op1.gpr();
1451     GPRReg op2GPR = op2.gpr();
1452     
1453     if (masqueradesAsUndefinedWatchpointIsStillValid()) {
1454         if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1455             speculationCheck(
1456                 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(), m_jit.branchIfNotObject(op1GPR));
1457         }
1458         if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1459             speculationCheck(
1460                 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(), m_jit.branchIfNotObject(op2GPR));
1461         }
1462     } else {
1463         if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1464             speculationCheck(
1465                 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1466                 m_jit.branchIfNotObject(op1GPR));
1467         }
1468         speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1469             m_jit.branchTest8(
1470                 MacroAssembler::NonZero, 
1471                 MacroAssembler::Address(op1GPR, JSCell::typeInfoFlagsOffset()), 
1472                 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1473
1474         if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1475             speculationCheck(
1476                 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1477                 m_jit.branchIfNotObject(op2GPR));
1478         }
1479         speculationCheck(BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1480             m_jit.branchTest8(
1481                 MacroAssembler::NonZero, 
1482                 MacroAssembler::Address(op2GPR, JSCell::typeInfoFlagsOffset()), 
1483                 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1484     }
1485
1486     branchPtr(condition, op1GPR, op2GPR, taken);
1487     jump(notTaken);
1488 }
1489
1490 void SpeculativeJIT::compilePeepHoleBooleanBranch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1491 {
1492     BasicBlock* taken = branchNode->branchData()->taken.block;
1493     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1494
1495     // The branch instruction will branch to the taken block.
1496     // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1497     if (taken == nextBlock()) {
1498         condition = JITCompiler::invert(condition);
1499         BasicBlock* tmp = taken;
1500         taken = notTaken;
1501         notTaken = tmp;
1502     }
1503
1504     if (node->child1()->isInt32Constant()) {
1505         int32_t imm = node->child1()->asInt32();
1506         SpeculateBooleanOperand op2(this, node->child2());
1507         branch32(condition, JITCompiler::Imm32(imm), op2.gpr(), taken);
1508     } else if (node->child2()->isInt32Constant()) {
1509         SpeculateBooleanOperand op1(this, node->child1());
1510         int32_t imm = node->child2()->asInt32();
1511         branch32(condition, op1.gpr(), JITCompiler::Imm32(imm), taken);
1512     } else {
1513         SpeculateBooleanOperand op1(this, node->child1());
1514         SpeculateBooleanOperand op2(this, node->child2());
1515         branch32(condition, op1.gpr(), op2.gpr(), taken);
1516     }
1517
1518     jump(notTaken);
1519 }
1520
1521 void SpeculativeJIT::compileStringSlice(Node* node)
1522 {
1523     SpeculateCellOperand string(this, node->child1());
1524     GPRTemporary startIndex(this);
1525     GPRTemporary temp(this);
1526     GPRTemporary temp2(this);
1527
1528     GPRReg stringGPR = string.gpr();
1529     GPRReg startIndexGPR = startIndex.gpr();
1530     GPRReg tempGPR = temp.gpr();
1531     GPRReg temp2GPR = temp2.gpr();
1532
1533     speculateString(node->child1(), stringGPR);
1534
1535     {
1536         m_jit.load32(JITCompiler::Address(stringGPR, JSString::offsetOfLength()), temp2GPR);
1537
1538         emitPopulateSliceIndex(node->child2(), temp2GPR, startIndexGPR);
1539         if (node->child3())
1540             emitPopulateSliceIndex(node->child3(), temp2GPR, tempGPR);
1541         else
1542             m_jit.move(temp2GPR, tempGPR);
1543     }
1544
1545     CCallHelpers::JumpList doneCases;
1546     CCallHelpers::JumpList slowCases;
1547
1548     auto nonEmptyCase = m_jit.branch32(MacroAssembler::Below, startIndexGPR, tempGPR);
1549     m_jit.move(TrustedImmPtr::weakPointer(m_jit.graph(), jsEmptyString(&vm())), tempGPR);
1550     doneCases.append(m_jit.jump());
1551
1552     nonEmptyCase.link(&m_jit);
1553     m_jit.sub32(startIndexGPR, tempGPR); // the size of the sliced string.
1554     slowCases.append(m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(1)));
1555
1556     m_jit.loadPtr(MacroAssembler::Address(stringGPR, JSString::offsetOfValue()), temp2GPR);
1557     slowCases.append(m_jit.branchTestPtr(MacroAssembler::Zero, temp2GPR));
1558
1559     m_jit.loadPtr(MacroAssembler::Address(temp2GPR, StringImpl::dataOffset()), tempGPR);
1560
1561     // Load the character into scratchReg
1562     m_jit.zeroExtend32ToPtr(startIndexGPR, startIndexGPR);
1563     auto is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(temp2GPR, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
1564
1565     m_jit.load8(MacroAssembler::BaseIndex(tempGPR, startIndexGPR, MacroAssembler::TimesOne, 0), tempGPR);
1566     auto cont8Bit = m_jit.jump();
1567
1568     is16Bit.link(&m_jit);
1569     m_jit.load16(MacroAssembler::BaseIndex(tempGPR, startIndexGPR, MacroAssembler::TimesTwo, 0), tempGPR);
1570
1571     auto bigCharacter = m_jit.branch32(MacroAssembler::AboveOrEqual, tempGPR, TrustedImm32(0x100));
1572
1573     // 8 bit string values don't need the isASCII check.
1574     cont8Bit.link(&m_jit);
1575
1576     m_jit.lshift32(MacroAssembler::TrustedImm32(sizeof(void*) == 4 ? 2 : 3), tempGPR);
1577     m_jit.addPtr(TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), tempGPR);
1578     m_jit.loadPtr(tempGPR, tempGPR);
1579
1580     addSlowPathGenerator(
1581         slowPathCall(
1582             bigCharacter, this, operationSingleCharacterString, tempGPR, tempGPR));
1583
1584     addSlowPathGenerator(
1585         slowPathCall(
1586             slowCases, this, operationStringSubstr, tempGPR, stringGPR, startIndexGPR, tempGPR));
1587
1588     doneCases.link(&m_jit);
1589     cellResult(tempGPR, node);
1590 }
1591
1592 void SpeculativeJIT::compileToLowerCase(Node* node)
1593 {
1594     ASSERT(node->op() == ToLowerCase);
1595     SpeculateCellOperand string(this, node->child1());
1596     GPRTemporary temp(this);
1597     GPRTemporary index(this);
1598     GPRTemporary charReg(this);
1599     GPRTemporary length(this);
1600
1601     GPRReg stringGPR = string.gpr();
1602     GPRReg tempGPR = temp.gpr();
1603     GPRReg indexGPR = index.gpr();
1604     GPRReg charGPR = charReg.gpr();
1605     GPRReg lengthGPR = length.gpr();
1606
1607     speculateString(node->child1(), stringGPR);
1608
1609     CCallHelpers::JumpList slowPath;
1610
1611     m_jit.move(TrustedImmPtr(0), indexGPR);
1612
1613     m_jit.loadPtr(MacroAssembler::Address(stringGPR, JSString::offsetOfValue()), tempGPR);
1614     slowPath.append(m_jit.branchTestPtr(MacroAssembler::Zero, tempGPR));
1615
1616     slowPath.append(m_jit.branchTest32(
1617         MacroAssembler::Zero, MacroAssembler::Address(tempGPR, StringImpl::flagsOffset()),
1618         MacroAssembler::TrustedImm32(StringImpl::flagIs8Bit())));
1619     m_jit.load32(MacroAssembler::Address(tempGPR, StringImpl::lengthMemoryOffset()), lengthGPR);
1620     m_jit.loadPtr(MacroAssembler::Address(tempGPR, StringImpl::dataOffset()), tempGPR);
1621
1622     auto loopStart = m_jit.label();
1623     auto loopDone = m_jit.branch32(CCallHelpers::AboveOrEqual, indexGPR, lengthGPR);
1624     m_jit.load8(MacroAssembler::BaseIndex(tempGPR, indexGPR, MacroAssembler::TimesOne), charGPR);
1625     slowPath.append(m_jit.branchTest32(CCallHelpers::NonZero, charGPR, TrustedImm32(~0x7F)));
1626     m_jit.sub32(TrustedImm32('A'), charGPR);
1627     slowPath.append(m_jit.branch32(CCallHelpers::BelowOrEqual, charGPR, TrustedImm32('Z' - 'A')));
1628
1629     m_jit.add32(TrustedImm32(1), indexGPR);
1630     m_jit.jump().linkTo(loopStart, &m_jit);
1631     
1632     slowPath.link(&m_jit);
1633     silentSpillAllRegisters(lengthGPR);
1634     callOperation(operationToLowerCase, lengthGPR, stringGPR, indexGPR);
1635     silentFillAllRegisters();
1636     m_jit.exceptionCheck();
1637     auto done = m_jit.jump();
1638
1639     loopDone.link(&m_jit);
1640     m_jit.move(stringGPR, lengthGPR);
1641
1642     done.link(&m_jit);
1643     cellResult(lengthGPR, node);
1644 }
1645
1646 void SpeculativeJIT::compilePeepHoleInt32Branch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1647 {
1648     BasicBlock* taken = branchNode->branchData()->taken.block;
1649     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1650
1651     // The branch instruction will branch to the taken block.
1652     // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1653     if (taken == nextBlock()) {
1654         condition = JITCompiler::invert(condition);
1655         BasicBlock* tmp = taken;
1656         taken = notTaken;
1657         notTaken = tmp;
1658     }
1659
1660     if (node->child1()->isInt32Constant()) {
1661         int32_t imm = node->child1()->asInt32();
1662         SpeculateInt32Operand op2(this, node->child2());
1663         branch32(condition, JITCompiler::Imm32(imm), op2.gpr(), taken);
1664     } else if (node->child2()->isInt32Constant()) {
1665         SpeculateInt32Operand op1(this, node->child1());
1666         int32_t imm = node->child2()->asInt32();
1667         branch32(condition, op1.gpr(), JITCompiler::Imm32(imm), taken);
1668     } else {
1669         SpeculateInt32Operand op1(this, node->child1());
1670         SpeculateInt32Operand op2(this, node->child2());
1671         branch32(condition, op1.gpr(), op2.gpr(), taken);
1672     }
1673
1674     jump(notTaken);
1675 }
1676
1677 // Returns true if the compare is fused with a subsequent branch.
1678 bool SpeculativeJIT::compilePeepHoleBranch(Node* node, MacroAssembler::RelationalCondition condition, MacroAssembler::DoubleCondition doubleCondition, S_JITOperation_EJJ operation)
1679 {
1680     // Fused compare & branch.
1681     unsigned branchIndexInBlock = detectPeepHoleBranch();
1682     if (branchIndexInBlock != UINT_MAX) {
1683         Node* branchNode = m_block->at(branchIndexInBlock);
1684
1685         // detectPeepHoleBranch currently only permits the branch to be the very next node,
1686         // so can be no intervening nodes to also reference the compare. 
1687         ASSERT(node->adjustedRefCount() == 1);
1688
1689         if (node->isBinaryUseKind(Int32Use))
1690             compilePeepHoleInt32Branch(node, branchNode, condition);
1691 #if USE(JSVALUE64)
1692         else if (node->isBinaryUseKind(Int52RepUse))
1693             compilePeepHoleInt52Branch(node, branchNode, condition);
1694 #endif // USE(JSVALUE64)
1695         else if (node->isBinaryUseKind(StringUse) || node->isBinaryUseKind(StringIdentUse)) {
1696             // Use non-peephole comparison, for now.
1697             return false;
1698         } else if (node->isBinaryUseKind(DoubleRepUse))
1699             compilePeepHoleDoubleBranch(node, branchNode, doubleCondition);
1700         else if (node->op() == CompareEq) {
1701             if (node->isBinaryUseKind(BooleanUse))
1702                 compilePeepHoleBooleanBranch(node, branchNode, condition);
1703             else if (node->isBinaryUseKind(SymbolUse))
1704                 compilePeepHoleSymbolEquality(node, branchNode);
1705             else if (node->isBinaryUseKind(ObjectUse))
1706                 compilePeepHoleObjectEquality(node, branchNode);
1707             else if (node->isBinaryUseKind(ObjectUse, ObjectOrOtherUse))
1708                 compilePeepHoleObjectToObjectOrOtherEquality(node->child1(), node->child2(), branchNode);
1709             else if (node->isBinaryUseKind(ObjectOrOtherUse, ObjectUse))
1710                 compilePeepHoleObjectToObjectOrOtherEquality(node->child2(), node->child1(), branchNode);
1711             else if (!needsTypeCheck(node->child1(), SpecOther))
1712                 nonSpeculativePeepholeBranchNullOrUndefined(node->child2(), branchNode);
1713             else if (!needsTypeCheck(node->child2(), SpecOther))
1714                 nonSpeculativePeepholeBranchNullOrUndefined(node->child1(), branchNode);
1715             else {
1716                 nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1717                 return true;
1718             }
1719         } else {
1720             nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1721             return true;
1722         }
1723
1724         use(node->child1());
1725         use(node->child2());
1726         m_indexInBlock = branchIndexInBlock;
1727         m_currentNode = branchNode;
1728         return true;
1729     }
1730     return false;
1731 }
1732
1733 void SpeculativeJIT::noticeOSRBirth(Node* node)
1734 {
1735     if (!node->hasVirtualRegister())
1736         return;
1737     
1738     VirtualRegister virtualRegister = node->virtualRegister();
1739     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1740     
1741     info.noticeOSRBirth(*m_stream, node, virtualRegister);
1742 }
1743
1744 void SpeculativeJIT::compileMovHint(Node* node)
1745 {
1746     ASSERT(node->containsMovHint() && node->op() != ZombieHint);
1747     
1748     Node* child = node->child1().node();
1749     noticeOSRBirth(child);
1750     
1751     m_stream->appendAndLog(VariableEvent::movHint(MinifiedID(child), node->unlinkedLocal()));
1752 }
1753
1754 void SpeculativeJIT::bail(AbortReason reason)
1755 {
1756     if (verboseCompilationEnabled())
1757         dataLog("Bailing compilation.\n");
1758     m_compileOkay = true;
1759     m_jit.abortWithReason(reason, m_lastGeneratedNode);
1760     clearGenerationInfo();
1761 }
1762
1763 void SpeculativeJIT::compileCurrentBlock()
1764 {
1765     ASSERT(m_compileOkay);
1766     
1767     if (!m_block)
1768         return;
1769     
1770     ASSERT(m_block->isReachable);
1771     
1772     m_jit.blockHeads()[m_block->index] = m_jit.label();
1773
1774     if (!m_block->intersectionOfCFAHasVisited) {
1775         // Don't generate code for basic blocks that are unreachable according to CFA.
1776         // But to be sure that nobody has generated a jump to this block, drop in a
1777         // breakpoint here.
1778         m_jit.abortWithReason(DFGUnreachableBasicBlock);
1779         return;
1780     }
1781
1782     if (m_block->isCatchEntrypoint) {
1783         m_jit.addPtr(CCallHelpers::TrustedImm32(m_jit.graph().stackPointerOffset() * sizeof(Register)), GPRInfo::callFrameRegister,  CCallHelpers::stackPointerRegister);
1784         m_jit.emitSaveCalleeSaves();
1785         m_jit.emitMaterializeTagCheckRegisters();
1786         m_jit.emitPutToCallFrameHeader(m_jit.codeBlock(), CallFrameSlot::codeBlock);
1787     }
1788
1789     m_stream->appendAndLog(VariableEvent::reset());
1790     
1791     m_jit.jitAssertHasValidCallFrame();
1792     m_jit.jitAssertTagsInPlace();
1793     m_jit.jitAssertArgumentCountSane();
1794
1795     m_state.reset();
1796     m_state.beginBasicBlock(m_block);
1797     
1798     for (size_t i = m_block->variablesAtHead.size(); i--;) {
1799         int operand = m_block->variablesAtHead.operandForIndex(i);
1800         Node* node = m_block->variablesAtHead[i];
1801         if (!node)
1802             continue; // No need to record dead SetLocal's.
1803         
1804         VariableAccessData* variable = node->variableAccessData();
1805         DataFormat format;
1806         if (!node->refCount())
1807             continue; // No need to record dead SetLocal's.
1808         format = dataFormatFor(variable->flushFormat());
1809         m_stream->appendAndLog(
1810             VariableEvent::setLocal(
1811                 VirtualRegister(operand),
1812                 variable->machineLocal(),
1813                 format));
1814     }
1815
1816     m_origin = NodeOrigin();
1817     
1818     for (m_indexInBlock = 0; m_indexInBlock < m_block->size(); ++m_indexInBlock) {
1819         m_currentNode = m_block->at(m_indexInBlock);
1820         
1821         // We may have hit a contradiction that the CFA was aware of but that the JIT
1822         // didn't cause directly.
1823         if (!m_state.isValid()) {
1824             bail(DFGBailedAtTopOfBlock);
1825             return;
1826         }
1827
1828         m_interpreter.startExecuting();
1829         m_interpreter.executeKnownEdgeTypes(m_currentNode);
1830         m_jit.setForNode(m_currentNode);
1831         m_origin = m_currentNode->origin;
1832         if (validationEnabled())
1833             m_origin.exitOK &= mayExit(m_jit.graph(), m_currentNode) == Exits;
1834         m_lastGeneratedNode = m_currentNode->op();
1835         
1836         ASSERT(m_currentNode->shouldGenerate());
1837         
1838         if (verboseCompilationEnabled()) {
1839             dataLogF(
1840                 "SpeculativeJIT generating Node @%d (bc#%u) at JIT offset 0x%x",
1841                 (int)m_currentNode->index(),
1842                 m_currentNode->origin.semantic.bytecodeIndex, m_jit.debugOffset());
1843             dataLog("\n");
1844         }
1845
1846         if (Options::validateDFGExceptionHandling() && (mayExit(m_jit.graph(), m_currentNode) != DoesNotExit || m_currentNode->isTerminal()))
1847             m_jit.jitReleaseAssertNoException(*m_jit.vm());
1848
1849         m_jit.pcToCodeOriginMapBuilder().appendItem(m_jit.labelIgnoringWatchpoints(), m_origin.semantic);
1850
1851         compile(m_currentNode);
1852         
1853         if (belongsInMinifiedGraph(m_currentNode->op()))
1854             m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode));
1855         
1856 #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
1857         m_jit.clearRegisterAllocationOffsets();
1858 #endif
1859         
1860         if (!m_compileOkay) {
1861             bail(DFGBailedAtEndOfNode);
1862             return;
1863         }
1864         
1865         // Make sure that the abstract state is rematerialized for the next node.
1866         m_interpreter.executeEffects(m_indexInBlock);
1867     }
1868     
1869     // Perform the most basic verification that children have been used correctly.
1870     if (!ASSERT_DISABLED) {
1871         for (auto& info : m_generationInfo)
1872             RELEASE_ASSERT(!info.alive());
1873     }
1874 }
1875
1876 // If we are making type predictions about our arguments then
1877 // we need to check that they are correct on function entry.
1878 void SpeculativeJIT::checkArgumentTypes()
1879 {
1880     ASSERT(!m_currentNode);
1881     m_origin = NodeOrigin(CodeOrigin(0), CodeOrigin(0), true);
1882
1883     auto& arguments = m_jit.graph().m_rootToArguments.find(m_jit.graph().block(0))->value;
1884     for (int i = 0; i < m_jit.codeBlock()->numParameters(); ++i) {
1885         Node* node = arguments[i];
1886         if (!node) {
1887             // The argument is dead. We don't do any checks for such arguments.
1888             continue;
1889         }
1890         
1891         ASSERT(node->op() == SetArgument);
1892         ASSERT(node->shouldGenerate());
1893
1894         VariableAccessData* variableAccessData = node->variableAccessData();
1895         FlushFormat format = variableAccessData->flushFormat();
1896         
1897         if (format == FlushedJSValue)
1898             continue;
1899         
1900         VirtualRegister virtualRegister = variableAccessData->local();
1901
1902         JSValueSource valueSource = JSValueSource(JITCompiler::addressFor(virtualRegister));
1903         
1904 #if USE(JSVALUE64)
1905         switch (format) {
1906         case FlushedInt32: {
1907             speculationCheck(BadType, valueSource, node, m_jit.branch64(MacroAssembler::Below, JITCompiler::addressFor(virtualRegister), GPRInfo::tagTypeNumberRegister));
1908             break;
1909         }
1910         case FlushedBoolean: {
1911             GPRTemporary temp(this);
1912             m_jit.load64(JITCompiler::addressFor(virtualRegister), temp.gpr());
1913             m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), temp.gpr());
1914             speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, temp.gpr(), TrustedImm32(static_cast<int32_t>(~1))));
1915             break;
1916         }
1917         case FlushedCell: {
1918             speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, JITCompiler::addressFor(virtualRegister), GPRInfo::tagMaskRegister));
1919             break;
1920         }
1921         default:
1922             RELEASE_ASSERT_NOT_REACHED();
1923             break;
1924         }
1925 #else
1926         switch (format) {
1927         case FlushedInt32: {
1928             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::Int32Tag)));
1929             break;
1930         }
1931         case FlushedBoolean: {
1932             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::BooleanTag)));
1933             break;
1934         }
1935         case FlushedCell: {
1936             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::CellTag)));
1937             break;
1938         }
1939         default:
1940             RELEASE_ASSERT_NOT_REACHED();
1941             break;
1942         }
1943 #endif
1944     }
1945
1946     m_origin = NodeOrigin();
1947 }
1948
1949 bool SpeculativeJIT::compile()
1950 {
1951     checkArgumentTypes();
1952     
1953     ASSERT(!m_currentNode);
1954     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1955         m_jit.setForBlockIndex(blockIndex);
1956         m_block = m_jit.graph().block(blockIndex);
1957         compileCurrentBlock();
1958     }
1959     linkBranches();
1960     return true;
1961 }
1962
1963 void SpeculativeJIT::createOSREntries()
1964 {
1965     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1966         BasicBlock* block = m_jit.graph().block(blockIndex);
1967         if (!block)
1968             continue;
1969         if (block->isOSRTarget || block->isCatchEntrypoint) {
1970             // Currently we don't have OSR entry trampolines. We could add them
1971             // here if need be.
1972             m_osrEntryHeads.append(m_jit.blockHeads()[blockIndex]);
1973         }
1974     }
1975 }
1976
1977 void SpeculativeJIT::linkOSREntries(LinkBuffer& linkBuffer)
1978 {
1979     unsigned osrEntryIndex = 0;
1980     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1981         BasicBlock* block = m_jit.graph().block(blockIndex);
1982         if (!block)
1983             continue;
1984         if (!block->isOSRTarget && !block->isCatchEntrypoint)
1985             continue;
1986         if (block->isCatchEntrypoint) {
1987             auto& argumentsVector = m_jit.graph().m_rootToArguments.find(block)->value;
1988             Vector<FlushFormat> argumentFormats;
1989             argumentFormats.reserveInitialCapacity(argumentsVector.size());
1990             for (Node* setArgument : argumentsVector) {
1991                 if (setArgument) {
1992                     FlushFormat flushFormat = setArgument->variableAccessData()->flushFormat();
1993                     ASSERT(flushFormat == FlushedInt32 || flushFormat == FlushedCell || flushFormat == FlushedBoolean || flushFormat == FlushedJSValue);
1994                     argumentFormats.uncheckedAppend(flushFormat);
1995                 } else
1996                     argumentFormats.uncheckedAppend(DeadFlush);
1997             }
1998             m_jit.noticeCatchEntrypoint(*block, m_osrEntryHeads[osrEntryIndex++], linkBuffer, WTFMove(argumentFormats));
1999         } else {
2000             ASSERT(block->isOSRTarget);
2001             m_jit.noticeOSREntry(*block, m_osrEntryHeads[osrEntryIndex++], linkBuffer);
2002         }
2003     }
2004
2005     m_jit.jitCode()->finalizeOSREntrypoints();
2006     m_jit.jitCode()->common.finalizeCatchEntrypoints();
2007
2008     ASSERT(osrEntryIndex == m_osrEntryHeads.size());
2009     
2010     if (verboseCompilationEnabled()) {
2011         DumpContext dumpContext;
2012         dataLog("OSR Entries:\n");
2013         for (OSREntryData& entryData : m_jit.jitCode()->osrEntry)
2014             dataLog("    ", inContext(entryData, &dumpContext), "\n");
2015         if (!dumpContext.isEmpty())
2016             dumpContext.dump(WTF::dataFile());
2017     }
2018 }
2019     
2020 void SpeculativeJIT::compileCheckTraps(Node*)
2021 {
2022     ASSERT(Options::usePollingTraps());
2023     GPRTemporary unused(this);
2024     GPRReg unusedGPR = unused.gpr();
2025
2026     JITCompiler::Jump needTrapHandling = m_jit.branchTest8(JITCompiler::NonZero,
2027         JITCompiler::AbsoluteAddress(m_jit.vm()->needTrapHandlingAddress()));
2028
2029     addSlowPathGenerator(slowPathCall(needTrapHandling, this, operationHandleTraps, unusedGPR));
2030 }
2031
2032 void SpeculativeJIT::compileDoublePutByVal(Node* node, SpeculateCellOperand& base, SpeculateStrictInt32Operand& property)
2033 {
2034     Edge child3 = m_jit.graph().varArgChild(node, 2);
2035     Edge child4 = m_jit.graph().varArgChild(node, 3);
2036
2037     ArrayMode arrayMode = node->arrayMode();
2038     
2039     GPRReg baseReg = base.gpr();
2040     GPRReg propertyReg = property.gpr();
2041     
2042     SpeculateDoubleOperand value(this, child3);
2043
2044     FPRReg valueReg = value.fpr();
2045     
2046     DFG_TYPE_CHECK(
2047         JSValueRegs(), child3, SpecFullRealNumber,
2048         m_jit.branchDouble(
2049             MacroAssembler::DoubleNotEqualOrUnordered, valueReg, valueReg));
2050     
2051     if (!m_compileOkay)
2052         return;
2053     
2054     StorageOperand storage(this, child4);
2055     GPRReg storageReg = storage.gpr();
2056
2057     if (node->op() == PutByValAlias) {
2058         // Store the value to the array.
2059         GPRReg propertyReg = property.gpr();
2060         FPRReg valueReg = value.fpr();
2061         m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
2062         
2063         noResult(m_currentNode);
2064         return;
2065     }
2066     
2067     GPRTemporary temporary;
2068     GPRReg temporaryReg = temporaryRegisterForPutByVal(temporary, node);
2069
2070     MacroAssembler::Jump slowCase;
2071     
2072     if (arrayMode.isInBounds()) {
2073         speculationCheck(
2074             OutOfBounds, JSValueRegs(), 0,
2075             m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())));
2076     } else {
2077         MacroAssembler::Jump inBounds = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
2078         
2079         slowCase = m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfVectorLength()));
2080         
2081         if (!arrayMode.isOutOfBounds())
2082             speculationCheck(OutOfBounds, JSValueRegs(), 0, slowCase);
2083         
2084         m_jit.add32(TrustedImm32(1), propertyReg, temporaryReg);
2085         m_jit.store32(temporaryReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
2086         
2087         inBounds.link(&m_jit);
2088     }
2089     
2090     m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
2091
2092     base.use();
2093     property.use();
2094     value.use();
2095     storage.use();
2096     
2097     if (arrayMode.isOutOfBounds()) {
2098         addSlowPathGenerator(
2099             slowPathCall(
2100                 slowCase, this,
2101                 m_jit.codeBlock()->isStrictMode() ? operationPutDoubleByValBeyondArrayBoundsStrict : operationPutDoubleByValBeyondArrayBoundsNonStrict,
2102                 NoResult, baseReg, propertyReg, valueReg));
2103     }
2104
2105     noResult(m_currentNode, UseChildrenCalledExplicitly);
2106 }
2107
2108 void SpeculativeJIT::compileGetCharCodeAt(Node* node)
2109 {
2110     SpeculateCellOperand string(this, node->child1());
2111     SpeculateStrictInt32Operand index(this, node->child2());
2112     StorageOperand storage(this, node->child3());
2113
2114     GPRReg stringReg = string.gpr();
2115     GPRReg indexReg = index.gpr();
2116     GPRReg storageReg = storage.gpr();
2117     
2118     ASSERT(speculationChecked(m_state.forNode(node->child1()).m_type, SpecString));
2119
2120     // unsigned comparison so we can filter out negative indices and indices that are too large
2121     speculationCheck(Uncountable, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::AboveOrEqual, indexReg, MacroAssembler::Address(stringReg, JSString::offsetOfLength())));
2122
2123     GPRTemporary scratch(this);
2124     GPRReg scratchReg = scratch.gpr();
2125
2126     m_jit.loadPtr(MacroAssembler::Address(stringReg, JSString::offsetOfValue()), scratchReg);
2127
2128     // Load the character into scratchReg
2129     JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
2130
2131     m_jit.load8(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesOne, 0), scratchReg);
2132     JITCompiler::Jump cont8Bit = m_jit.jump();
2133
2134     is16Bit.link(&m_jit);
2135
2136     m_jit.load16(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesTwo, 0), scratchReg);
2137
2138     cont8Bit.link(&m_jit);
2139
2140     int32Result(scratchReg, m_currentNode);
2141 }
2142
2143 void SpeculativeJIT::compileGetByValOnString(Node* node)
2144 {
2145     SpeculateCellOperand base(this, node->child1());
2146     SpeculateStrictInt32Operand property(this, node->child2());
2147     StorageOperand storage(this, node->child3());
2148     GPRReg baseReg = base.gpr();
2149     GPRReg propertyReg = property.gpr();
2150     GPRReg storageReg = storage.gpr();
2151
2152     GPRTemporary scratch(this);
2153     GPRReg scratchReg = scratch.gpr();
2154 #if USE(JSVALUE32_64)
2155     GPRTemporary resultTag;
2156     GPRReg resultTagReg = InvalidGPRReg;
2157     if (node->arrayMode().isOutOfBounds()) {
2158         GPRTemporary realResultTag(this);
2159         resultTag.adopt(realResultTag);
2160         resultTagReg = resultTag.gpr();
2161     }
2162 #endif
2163
2164     ASSERT(ArrayMode(Array::String).alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2165
2166     // unsigned comparison so we can filter out negative indices and indices that are too large
2167     JITCompiler::Jump outOfBounds = m_jit.branch32(
2168         MacroAssembler::AboveOrEqual, propertyReg,
2169         MacroAssembler::Address(baseReg, JSString::offsetOfLength()));
2170     if (node->arrayMode().isInBounds())
2171         speculationCheck(OutOfBounds, JSValueRegs(), 0, outOfBounds);
2172
2173     m_jit.loadPtr(MacroAssembler::Address(baseReg, JSString::offsetOfValue()), scratchReg);
2174
2175     // Load the character into scratchReg
2176     JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
2177
2178     m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne, 0), scratchReg);
2179     JITCompiler::Jump cont8Bit = m_jit.jump();
2180
2181     is16Bit.link(&m_jit);
2182
2183     m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo, 0), scratchReg);
2184
2185     JITCompiler::Jump bigCharacter =
2186         m_jit.branch32(MacroAssembler::AboveOrEqual, scratchReg, TrustedImm32(0x100));
2187
2188     // 8 bit string values don't need the isASCII check.
2189     cont8Bit.link(&m_jit);
2190
2191     m_jit.lshift32(MacroAssembler::TrustedImm32(sizeof(void*) == 4 ? 2 : 3), scratchReg);
2192     m_jit.addPtr(TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), scratchReg);
2193     m_jit.loadPtr(scratchReg, scratchReg);
2194
2195     addSlowPathGenerator(
2196         slowPathCall(
2197             bigCharacter, this, operationSingleCharacterString, scratchReg, scratchReg));
2198
2199     if (node->arrayMode().isOutOfBounds()) {
2200 #if USE(JSVALUE32_64)
2201         m_jit.move(TrustedImm32(JSValue::CellTag), resultTagReg);
2202 #endif
2203
2204         JSGlobalObject* globalObject = m_jit.globalObjectFor(node->origin.semantic);
2205         bool prototypeChainIsSane = false;
2206         if (globalObject->stringPrototypeChainIsSane()) {
2207             // FIXME: This could be captured using a Speculation mode that means "out-of-bounds
2208             // loads return a trivial value". Something like SaneChainOutOfBounds. This should
2209             // speculate that we don't take negative out-of-bounds, or better yet, it should rely
2210             // on a stringPrototypeChainIsSane() guaranteeing that the prototypes have no negative
2211             // indexed properties either.
2212             // https://bugs.webkit.org/show_bug.cgi?id=144668
2213             m_jit.graph().registerAndWatchStructureTransition(globalObject->stringPrototype()->structure());
2214             m_jit.graph().registerAndWatchStructureTransition(globalObject->objectPrototype()->structure());
2215             prototypeChainIsSane = globalObject->stringPrototypeChainIsSane();
2216         }
2217         if (prototypeChainIsSane) {
2218 #if USE(JSVALUE64)
2219             addSlowPathGenerator(std::make_unique<SaneStringGetByValSlowPathGenerator>(
2220                 outOfBounds, this, JSValueRegs(scratchReg), baseReg, propertyReg));
2221 #else
2222             addSlowPathGenerator(std::make_unique<SaneStringGetByValSlowPathGenerator>(
2223                 outOfBounds, this, JSValueRegs(resultTagReg, scratchReg),
2224                 baseReg, propertyReg));
2225 #endif
2226         } else {
2227 #if USE(JSVALUE64)
2228             addSlowPathGenerator(
2229                 slowPathCall(
2230                     outOfBounds, this, operationGetByValStringInt,
2231                     scratchReg, baseReg, propertyReg));
2232 #else
2233             addSlowPathGenerator(
2234                 slowPathCall(
2235                     outOfBounds, this, operationGetByValStringInt,
2236                     JSValueRegs(resultTagReg, scratchReg), baseReg, propertyReg));
2237 #endif
2238         }
2239         
2240 #if USE(JSVALUE64)
2241         jsValueResult(scratchReg, m_currentNode);
2242 #else
2243         jsValueResult(resultTagReg, scratchReg, m_currentNode);
2244 #endif
2245     } else
2246         cellResult(scratchReg, m_currentNode);
2247 }
2248
2249 void SpeculativeJIT::compileFromCharCode(Node* node)
2250 {
2251     Edge& child = node->child1();
2252     if (child.useKind() == UntypedUse) {
2253         JSValueOperand opr(this, child);
2254         JSValueRegs oprRegs = opr.jsValueRegs();
2255 #if USE(JSVALUE64)
2256         GPRTemporary result(this);
2257         JSValueRegs resultRegs = JSValueRegs(result.gpr());
2258 #else
2259         GPRTemporary resultTag(this);
2260         GPRTemporary resultPayload(this);
2261         JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
2262 #endif
2263         flushRegisters();
2264         callOperation(operationStringFromCharCodeUntyped, resultRegs, oprRegs);
2265         m_jit.exceptionCheck();
2266         
2267         jsValueResult(resultRegs, node);
2268         return;
2269     }
2270
2271     SpeculateStrictInt32Operand property(this, child);
2272     GPRReg propertyReg = property.gpr();
2273     GPRTemporary smallStrings(this);
2274     GPRTemporary scratch(this);
2275     GPRReg scratchReg = scratch.gpr();
2276     GPRReg smallStringsReg = smallStrings.gpr();
2277
2278     JITCompiler::JumpList slowCases;
2279     slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, TrustedImm32(0xff)));
2280     m_jit.move(TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), smallStringsReg);
2281     m_jit.loadPtr(MacroAssembler::BaseIndex(smallStringsReg, propertyReg, MacroAssembler::ScalePtr, 0), scratchReg);
2282
2283     slowCases.append(m_jit.branchTest32(MacroAssembler::Zero, scratchReg));
2284     addSlowPathGenerator(slowPathCall(slowCases, this, operationStringFromCharCode, scratchReg, propertyReg));
2285     cellResult(scratchReg, m_currentNode);
2286 }
2287
2288 GeneratedOperandType SpeculativeJIT::checkGeneratedTypeForToInt32(Node* node)
2289 {
2290     VirtualRegister virtualRegister = node->virtualRegister();
2291     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
2292
2293     switch (info.registerFormat()) {
2294     case DataFormatStorage:
2295         RELEASE_ASSERT_NOT_REACHED();
2296
2297     case DataFormatBoolean:
2298     case DataFormatCell:
2299         terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
2300         return GeneratedOperandTypeUnknown;
2301
2302     case DataFormatNone:
2303     case DataFormatJSCell:
2304     case DataFormatJS:
2305     case DataFormatJSBoolean:
2306     case DataFormatJSDouble:
2307         return GeneratedOperandJSValue;
2308
2309     case DataFormatJSInt32:
2310     case DataFormatInt32:
2311         return GeneratedOperandInteger;
2312
2313     default:
2314         RELEASE_ASSERT_NOT_REACHED();
2315         return GeneratedOperandTypeUnknown;
2316     }
2317 }
2318
2319 void SpeculativeJIT::compileValueToInt32(Node* node)
2320 {
2321     switch (node->child1().useKind()) {
2322 #if USE(JSVALUE64)
2323     case Int52RepUse: {
2324         SpeculateStrictInt52Operand op1(this, node->child1());
2325         GPRTemporary result(this, Reuse, op1);
2326         GPRReg op1GPR = op1.gpr();
2327         GPRReg resultGPR = result.gpr();
2328         m_jit.zeroExtend32ToPtr(op1GPR, resultGPR);
2329         int32Result(resultGPR, node, DataFormatInt32);
2330         return;
2331     }
2332 #endif // USE(JSVALUE64)
2333         
2334     case DoubleRepUse: {
2335         GPRTemporary result(this);
2336         SpeculateDoubleOperand op1(this, node->child1());
2337         FPRReg fpr = op1.fpr();
2338         GPRReg gpr = result.gpr();
2339         JITCompiler::Jump notTruncatedToInteger = m_jit.branchTruncateDoubleToInt32(fpr, gpr, JITCompiler::BranchIfTruncateFailed);
2340         
2341         addSlowPathGenerator(slowPathCall(notTruncatedToInteger, this,
2342             hasSensibleDoubleToInt() ? operationToInt32SensibleSlow : operationToInt32, NeedToSpill, ExceptionCheckRequirement::CheckNotNeeded, gpr, fpr));
2343         
2344         int32Result(gpr, node);
2345         return;
2346     }
2347     
2348     case NumberUse:
2349     case NotCellUse: {
2350         switch (checkGeneratedTypeForToInt32(node->child1().node())) {
2351         case GeneratedOperandInteger: {
2352             SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
2353             GPRTemporary result(this, Reuse, op1);
2354             m_jit.move(op1.gpr(), result.gpr());
2355             int32Result(result.gpr(), node, op1.format());
2356             return;
2357         }
2358         case GeneratedOperandJSValue: {
2359             GPRTemporary result(this);
2360 #if USE(JSVALUE64)
2361             JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2362
2363             GPRReg gpr = op1.gpr();
2364             GPRReg resultGpr = result.gpr();
2365             FPRTemporary tempFpr(this);
2366             FPRReg fpr = tempFpr.fpr();
2367
2368             JITCompiler::Jump isInteger = m_jit.branch64(MacroAssembler::AboveOrEqual, gpr, GPRInfo::tagTypeNumberRegister);
2369             JITCompiler::JumpList converted;
2370
2371             if (node->child1().useKind() == NumberUse) {
2372                 DFG_TYPE_CHECK(
2373                     JSValueRegs(gpr), node->child1(), SpecBytecodeNumber,
2374                     m_jit.branchTest64(
2375                         MacroAssembler::Zero, gpr, GPRInfo::tagTypeNumberRegister));
2376             } else {
2377                 JITCompiler::Jump isNumber = m_jit.branchTest64(MacroAssembler::NonZero, gpr, GPRInfo::tagTypeNumberRegister);
2378                 
2379                 DFG_TYPE_CHECK(
2380                     JSValueRegs(gpr), node->child1(), ~SpecCellCheck, m_jit.branchIfCell(JSValueRegs(gpr)));
2381                 
2382                 // It's not a cell: so true turns into 1 and all else turns into 0.
2383                 m_jit.compare64(JITCompiler::Equal, gpr, TrustedImm32(ValueTrue), resultGpr);
2384                 converted.append(m_jit.jump());
2385                 
2386                 isNumber.link(&m_jit);
2387             }
2388
2389             // First, if we get here we have a double encoded as a JSValue
2390             unboxDouble(gpr, resultGpr, fpr);
2391
2392             silentSpillAllRegisters(resultGpr);
2393             callOperation(operationToInt32, resultGpr, fpr);
2394             silentFillAllRegisters();
2395
2396             converted.append(m_jit.jump());
2397
2398             isInteger.link(&m_jit);
2399             m_jit.zeroExtend32ToPtr(gpr, resultGpr);
2400
2401             converted.link(&m_jit);
2402 #else
2403             Node* childNode = node->child1().node();
2404             VirtualRegister virtualRegister = childNode->virtualRegister();
2405             GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
2406
2407             JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2408
2409             GPRReg payloadGPR = op1.payloadGPR();
2410             GPRReg resultGpr = result.gpr();
2411         
2412             JITCompiler::JumpList converted;
2413
2414             if (info.registerFormat() == DataFormatJSInt32)
2415                 m_jit.move(payloadGPR, resultGpr);
2416             else {
2417                 GPRReg tagGPR = op1.tagGPR();
2418                 FPRTemporary tempFpr(this);
2419                 FPRReg fpr = tempFpr.fpr();
2420                 FPRTemporary scratch(this);
2421
2422                 JITCompiler::Jump isInteger = m_jit.branch32(MacroAssembler::Equal, tagGPR, TrustedImm32(JSValue::Int32Tag));
2423
2424                 if (node->child1().useKind() == NumberUse) {
2425                     DFG_TYPE_CHECK(
2426                         op1.jsValueRegs(), node->child1(), SpecBytecodeNumber,
2427                         m_jit.branch32(
2428                             MacroAssembler::AboveOrEqual, tagGPR,
2429                             TrustedImm32(JSValue::LowestTag)));
2430                 } else {
2431                     JITCompiler::Jump isNumber = m_jit.branch32(MacroAssembler::Below, tagGPR, TrustedImm32(JSValue::LowestTag));
2432                     
2433                     DFG_TYPE_CHECK(
2434                         op1.jsValueRegs(), node->child1(), ~SpecCell,
2435                         m_jit.branchIfCell(op1.jsValueRegs()));
2436                     
2437                     // It's not a cell: so true turns into 1 and all else turns into 0.
2438                     JITCompiler::Jump isBoolean = m_jit.branch32(JITCompiler::Equal, tagGPR, TrustedImm32(JSValue::BooleanTag));
2439                     m_jit.move(TrustedImm32(0), resultGpr);
2440                     converted.append(m_jit.jump());
2441                     
2442                     isBoolean.link(&m_jit);
2443                     m_jit.move(payloadGPR, resultGpr);
2444                     converted.append(m_jit.jump());
2445                     
2446                     isNumber.link(&m_jit);
2447                 }
2448
2449                 unboxDouble(tagGPR, payloadGPR, fpr, scratch.fpr());
2450
2451                 silentSpillAllRegisters(resultGpr);
2452                 callOperation(operationToInt32, resultGpr, fpr);
2453                 silentFillAllRegisters();
2454
2455                 converted.append(m_jit.jump());
2456
2457                 isInteger.link(&m_jit);
2458                 m_jit.move(payloadGPR, resultGpr);
2459
2460                 converted.link(&m_jit);
2461             }
2462 #endif
2463             int32Result(resultGpr, node);
2464             return;
2465         }
2466         case GeneratedOperandTypeUnknown:
2467             RELEASE_ASSERT(!m_compileOkay);
2468             return;
2469         }
2470         RELEASE_ASSERT_NOT_REACHED();
2471         return;
2472     }
2473     
2474     default:
2475         ASSERT(!m_compileOkay);
2476         return;
2477     }
2478 }
2479
2480 void SpeculativeJIT::compileUInt32ToNumber(Node* node)
2481 {
2482     if (doesOverflow(node->arithMode())) {
2483         if (enableInt52()) {
2484             SpeculateInt32Operand op1(this, node->child1());
2485             GPRTemporary result(this, Reuse, op1);
2486             m_jit.zeroExtend32ToPtr(op1.gpr(), result.gpr());
2487             strictInt52Result(result.gpr(), node);
2488             return;
2489         }
2490         SpeculateInt32Operand op1(this, node->child1());
2491         FPRTemporary result(this);
2492             
2493         GPRReg inputGPR = op1.gpr();
2494         FPRReg outputFPR = result.fpr();
2495             
2496         m_jit.convertInt32ToDouble(inputGPR, outputFPR);
2497             
2498         JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, inputGPR, TrustedImm32(0));
2499         m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), outputFPR);
2500         positive.link(&m_jit);
2501             
2502         doubleResult(outputFPR, node);
2503         return;
2504     }
2505     
2506     RELEASE_ASSERT(node->arithMode() == Arith::CheckOverflow);
2507
2508     SpeculateInt32Operand op1(this, node->child1());
2509     GPRTemporary result(this);
2510
2511     m_jit.move(op1.gpr(), result.gpr());
2512
2513     speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, result.gpr(), TrustedImm32(0)));
2514
2515     int32Result(result.gpr(), node, op1.format());
2516 }
2517
2518 void SpeculativeJIT::compileDoubleAsInt32(Node* node)
2519 {
2520     SpeculateDoubleOperand op1(this, node->child1());
2521     FPRTemporary scratch(this);
2522     GPRTemporary result(this);
2523     
2524     FPRReg valueFPR = op1.fpr();
2525     FPRReg scratchFPR = scratch.fpr();
2526     GPRReg resultGPR = result.gpr();
2527
2528     JITCompiler::JumpList failureCases;
2529     RELEASE_ASSERT(shouldCheckOverflow(node->arithMode()));
2530     m_jit.branchConvertDoubleToInt32(
2531         valueFPR, resultGPR, failureCases, scratchFPR,
2532         shouldCheckNegativeZero(node->arithMode()));
2533     speculationCheck(Overflow, JSValueRegs(), 0, failureCases);
2534
2535     int32Result(resultGPR, node);
2536 }
2537
2538 void SpeculativeJIT::compileDoubleRep(Node* node)
2539 {
2540     switch (node->child1().useKind()) {
2541     case RealNumberUse: {
2542         JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2543         FPRTemporary result(this);
2544         
2545         JSValueRegs op1Regs = op1.jsValueRegs();
2546         FPRReg resultFPR = result.fpr();
2547         
2548 #if USE(JSVALUE64)
2549         GPRTemporary temp(this);
2550         GPRReg tempGPR = temp.gpr();
2551         m_jit.unboxDoubleWithoutAssertions(op1Regs.gpr(), tempGPR, resultFPR);
2552 #else
2553         FPRTemporary temp(this);
2554         FPRReg tempFPR = temp.fpr();
2555         unboxDouble(op1Regs.tagGPR(), op1Regs.payloadGPR(), resultFPR, tempFPR);
2556 #endif
2557         
2558         JITCompiler::Jump done = m_jit.branchDouble(
2559             JITCompiler::DoubleEqual, resultFPR, resultFPR);
2560         
2561         DFG_TYPE_CHECK(
2562             op1Regs, node->child1(), SpecBytecodeRealNumber, m_jit.branchIfNotInt32(op1Regs));
2563         m_jit.convertInt32ToDouble(op1Regs.payloadGPR(), resultFPR);
2564         
2565         done.link(&m_jit);
2566         
2567         doubleResult(resultFPR, node);
2568         return;
2569     }
2570     
2571     case NotCellUse:
2572     case NumberUse: {
2573         ASSERT(!node->child1()->isNumberConstant()); // This should have been constant folded.
2574
2575         SpeculatedType possibleTypes = m_state.forNode(node->child1()).m_type;
2576         if (isInt32Speculation(possibleTypes)) {
2577             SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
2578             FPRTemporary result(this);
2579             m_jit.convertInt32ToDouble(op1.gpr(), result.fpr());
2580             doubleResult(result.fpr(), node);
2581             return;
2582         }
2583
2584         JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2585         FPRTemporary result(this);
2586
2587 #if USE(JSVALUE64)
2588         GPRTemporary temp(this);
2589
2590         GPRReg op1GPR = op1.gpr();
2591         GPRReg tempGPR = temp.gpr();
2592         FPRReg resultFPR = result.fpr();
2593         JITCompiler::JumpList done;
2594
2595         JITCompiler::Jump isInteger = m_jit.branch64(
2596             MacroAssembler::AboveOrEqual, op1GPR, GPRInfo::tagTypeNumberRegister);
2597
2598         if (node->child1().useKind() == NotCellUse) {
2599             JITCompiler::Jump isNumber = m_jit.branchTest64(MacroAssembler::NonZero, op1GPR, GPRInfo::tagTypeNumberRegister);
2600             JITCompiler::Jump isUndefined = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueUndefined));
2601
2602             static const double zero = 0;
2603             m_jit.loadDouble(TrustedImmPtr(&zero), resultFPR);
2604
2605             JITCompiler::Jump isNull = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueNull));
2606             done.append(isNull);
2607
2608             DFG_TYPE_CHECK(JSValueRegs(op1GPR), node->child1(), ~SpecCellCheck,
2609                 m_jit.branchTest64(JITCompiler::Zero, op1GPR, TrustedImm32(static_cast<int32_t>(TagBitBool))));
2610
2611             JITCompiler::Jump isFalse = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueFalse));
2612             static const double one = 1;
2613             m_jit.loadDouble(TrustedImmPtr(&one), resultFPR);
2614             done.append(m_jit.jump());
2615             done.append(isFalse);
2616
2617             isUndefined.link(&m_jit);
2618             static const double NaN = PNaN;
2619             m_jit.loadDouble(TrustedImmPtr(&NaN), resultFPR);
2620             done.append(m_jit.jump());
2621
2622             isNumber.link(&m_jit);
2623         } else if (needsTypeCheck(node->child1(), SpecBytecodeNumber)) {
2624             typeCheck(
2625                 JSValueRegs(op1GPR), node->child1(), SpecBytecodeNumber,
2626                 m_jit.branchTest64(MacroAssembler::Zero, op1GPR, GPRInfo::tagTypeNumberRegister));
2627         }
2628
2629         unboxDouble(op1GPR, tempGPR, resultFPR);
2630         done.append(m_jit.jump());
2631     
2632         isInteger.link(&m_jit);
2633         m_jit.convertInt32ToDouble(op1GPR, resultFPR);
2634         done.link(&m_jit);
2635 #else // USE(JSVALUE64) -> this is the 32_64 case
2636         FPRTemporary temp(this);
2637     
2638         GPRReg op1TagGPR = op1.tagGPR();
2639         GPRReg op1PayloadGPR = op1.payloadGPR();
2640         FPRReg tempFPR = temp.fpr();
2641         FPRReg resultFPR = result.fpr();
2642         JITCompiler::JumpList done;
2643     
2644         JITCompiler::Jump isInteger = m_jit.branch32(
2645             MacroAssembler::Equal, op1TagGPR, TrustedImm32(JSValue::Int32Tag));
2646
2647         if (node->child1().useKind() == NotCellUse) {
2648             JITCompiler::Jump isNumber = m_jit.branch32(JITCompiler::Below, op1TagGPR, JITCompiler::TrustedImm32(JSValue::LowestTag + 1));
2649             JITCompiler::Jump isUndefined = m_jit.branch32(JITCompiler::Equal, op1TagGPR, TrustedImm32(JSValue::UndefinedTag));
2650
2651             static const double zero = 0;
2652             m_jit.loadDouble(TrustedImmPtr(&zero), resultFPR);
2653
2654             JITCompiler::Jump isNull = m_jit.branch32(JITCompiler::Equal, op1TagGPR, TrustedImm32(JSValue::NullTag));
2655             done.append(isNull);
2656
2657             DFG_TYPE_CHECK(JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), ~SpecCell, m_jit.branch32(JITCompiler::NotEqual, op1TagGPR, TrustedImm32(JSValue::BooleanTag)));
2658
2659             JITCompiler::Jump isFalse = m_jit.branchTest32(JITCompiler::Zero, op1PayloadGPR, TrustedImm32(1));
2660             static const double one = 1;
2661             m_jit.loadDouble(TrustedImmPtr(&one), resultFPR);
2662             done.append(m_jit.jump());
2663             done.append(isFalse);
2664
2665             isUndefined.link(&m_jit);
2666             static const double NaN = PNaN;
2667             m_jit.loadDouble(TrustedImmPtr(&NaN), resultFPR);
2668             done.append(m_jit.jump());
2669
2670             isNumber.link(&m_jit);
2671         } else if (needsTypeCheck(node->child1(), SpecBytecodeNumber)) {
2672             typeCheck(
2673                 JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), SpecBytecodeNumber,
2674                 m_jit.branch32(MacroAssembler::AboveOrEqual, op1TagGPR, TrustedImm32(JSValue::LowestTag)));
2675         }
2676
2677         unboxDouble(op1TagGPR, op1PayloadGPR, resultFPR, tempFPR);
2678         done.append(m_jit.jump());
2679     
2680         isInteger.link(&m_jit);
2681         m_jit.convertInt32ToDouble(op1PayloadGPR, resultFPR);
2682         done.link(&m_jit);
2683 #endif // USE(JSVALUE64)
2684     
2685         doubleResult(resultFPR, node);
2686         return;
2687     }
2688         
2689 #if USE(JSVALUE64)
2690     case Int52RepUse: {
2691         SpeculateStrictInt52Operand value(this, node->child1());
2692         FPRTemporary result(this);
2693         
2694         GPRReg valueGPR = value.gpr();
2695         FPRReg resultFPR = result.fpr();
2696
2697         m_jit.convertInt64ToDouble(valueGPR, resultFPR);
2698         
2699         doubleResult(resultFPR, node);
2700         return;
2701     }
2702 #endif // USE(JSVALUE64)
2703         
2704     default:
2705         RELEASE_ASSERT_NOT_REACHED();
2706         return;
2707     }
2708 }
2709
2710 void SpeculativeJIT::compileValueRep(Node* node)
2711 {
2712     switch (node->child1().useKind()) {
2713     case DoubleRepUse: {
2714         SpeculateDoubleOperand value(this, node->child1());
2715         JSValueRegsTemporary result(this);
2716         
2717         FPRReg valueFPR = value.fpr();
2718         JSValueRegs resultRegs = result.regs();
2719         
2720         // It's very tempting to in-place filter the value to indicate that it's not impure NaN
2721         // anymore. Unfortunately, this would be unsound. If it's a GetLocal or if the value was
2722         // subject to a prior SetLocal, filtering the value would imply that the corresponding
2723         // local was purified.
2724         if (needsTypeCheck(node->child1(), ~SpecDoubleImpureNaN))
2725             m_jit.purifyNaN(valueFPR);
2726
2727         boxDouble(valueFPR, resultRegs);
2728         
2729         jsValueResult(resultRegs, node);
2730         return;
2731     }
2732         
2733 #if USE(JSVALUE64)
2734     case Int52RepUse: {
2735         SpeculateStrictInt52Operand value(this, node->child1());
2736         GPRTemporary result(this);
2737         
2738         GPRReg valueGPR = value.gpr();
2739         GPRReg resultGPR = result.gpr();
2740         
2741         boxInt52(valueGPR, resultGPR, DataFormatStrictInt52);
2742         
2743         jsValueResult(resultGPR, node);
2744         return;
2745     }
2746 #endif // USE(JSVALUE64)
2747         
2748     default:
2749         RELEASE_ASSERT_NOT_REACHED();
2750         return;
2751     }
2752 }
2753
2754 static double clampDoubleToByte(double d)
2755 {
2756     d += 0.5;
2757     if (!(d > 0))
2758         d = 0;
2759     else if (d > 255)
2760         d = 255;
2761     return d;
2762 }
2763
2764 static void compileClampIntegerToByte(JITCompiler& jit, GPRReg result)
2765 {
2766     MacroAssembler::Jump inBounds = jit.branch32(MacroAssembler::BelowOrEqual, result, JITCompiler::TrustedImm32(0xff));
2767     MacroAssembler::Jump tooBig = jit.branch32(MacroAssembler::GreaterThan, result, JITCompiler::TrustedImm32(0xff));
2768     jit.xorPtr(result, result);
2769     MacroAssembler::Jump clamped = jit.jump();
2770     tooBig.link(&jit);
2771     jit.move(JITCompiler::TrustedImm32(255), result);
2772     clamped.link(&jit);
2773     inBounds.link(&jit);
2774 }
2775
2776 static void compileClampDoubleToByte(JITCompiler& jit, GPRReg result, FPRReg source, FPRReg scratch)
2777 {
2778     // Unordered compare so we pick up NaN
2779     static const double zero = 0;
2780     static const double byteMax = 255;
2781     static const double half = 0.5;
2782     jit.loadDouble(JITCompiler::TrustedImmPtr(&zero), scratch);
2783     MacroAssembler::Jump tooSmall = jit.branchDouble(MacroAssembler::DoubleLessThanOrEqualOrUnordered, source, scratch);
2784     jit.loadDouble(JITCompiler::TrustedImmPtr(&byteMax), scratch);
2785     MacroAssembler::Jump tooBig = jit.branchDouble(MacroAssembler::DoubleGreaterThan, source, scratch);
2786     
2787     jit.loadDouble(JITCompiler::TrustedImmPtr(&half), scratch);
2788     // FIXME: This should probably just use a floating point round!
2789     // https://bugs.webkit.org/show_bug.cgi?id=72054
2790     jit.addDouble(source, scratch);
2791     jit.truncateDoubleToInt32(scratch, result);   
2792     MacroAssembler::Jump truncatedInt = jit.jump();
2793     
2794     tooSmall.link(&jit);
2795     jit.xorPtr(result, result);
2796     MacroAssembler::Jump zeroed = jit.jump();
2797     
2798     tooBig.link(&jit);
2799     jit.move(JITCompiler::TrustedImm32(255), result);
2800     
2801     truncatedInt.link(&jit);
2802     zeroed.link(&jit);
2803
2804 }
2805
2806 JITCompiler::Jump SpeculativeJIT::jumpForTypedArrayOutOfBounds(Node* node, GPRReg baseGPR, GPRReg indexGPR)
2807 {
2808     if (node->op() == PutByValAlias)
2809         return JITCompiler::Jump();
2810     JSArrayBufferView* view = m_jit.graph().tryGetFoldableView(
2811         m_state.forNode(m_jit.graph().child(node, 0)).m_value, node->arrayMode());
2812     if (view) {
2813         uint32_t length = view->length();
2814         Node* indexNode = m_jit.graph().child(node, 1).node();
2815         if (indexNode->isInt32Constant() && indexNode->asUInt32() < length)
2816             return JITCompiler::Jump();
2817         return m_jit.branch32(
2818             MacroAssembler::AboveOrEqual, indexGPR, MacroAssembler::Imm32(length));
2819     }
2820     return m_jit.branch32(
2821         MacroAssembler::AboveOrEqual, indexGPR,
2822         MacroAssembler::Address(baseGPR, JSArrayBufferView::offsetOfLength()));
2823 }
2824
2825 void SpeculativeJIT::emitTypedArrayBoundsCheck(Node* node, GPRReg baseGPR, GPRReg indexGPR)
2826 {
2827     JITCompiler::Jump jump = jumpForTypedArrayOutOfBounds(node, baseGPR, indexGPR);
2828     if (!jump.isSet())
2829         return;
2830     speculationCheck(OutOfBounds, JSValueRegs(), 0, jump);
2831 }
2832
2833 JITCompiler::Jump SpeculativeJIT::jumpForTypedArrayIsNeuteredIfOutOfBounds(Node* node, GPRReg base, JITCompiler::Jump outOfBounds)
2834 {
2835     JITCompiler::Jump done;
2836     if (outOfBounds.isSet()) {
2837         done = m_jit.jump();
2838         if (node->arrayMode().isInBounds())
2839             speculationCheck(OutOfBounds, JSValueSource(), 0, outOfBounds);
2840         else {
2841             outOfBounds.link(&m_jit);
2842
2843             JITCompiler::Jump notWasteful = m_jit.branch32(
2844                 MacroAssembler::NotEqual,
2845                 MacroAssembler::Address(base, JSArrayBufferView::offsetOfMode()),
2846                 TrustedImm32(WastefulTypedArray));
2847
2848             JITCompiler::Jump hasNullVector = m_jit.branchTestPtr(
2849                 MacroAssembler::Zero,
2850                 MacroAssembler::Address(base, JSArrayBufferView::offsetOfVector()));
2851             speculationCheck(Uncountable, JSValueSource(), node, hasNullVector);
2852             notWasteful.link(&m_jit);
2853         }
2854     }
2855     return done;
2856 }
2857
2858 void SpeculativeJIT::loadFromIntTypedArray(GPRReg storageReg, GPRReg propertyReg, GPRReg resultReg, TypedArrayType type)
2859 {
2860     switch (elementSize(type)) {
2861     case 1:
2862         if (isSigned(type))
2863             m_jit.load8SignedExtendTo32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2864         else
2865             m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2866         break;
2867     case 2:
2868         if (isSigned(type))
2869             m_jit.load16SignedExtendTo32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2870         else
2871             m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2872         break;
2873     case 4:
2874         m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
2875         break;
2876     default:
2877         CRASH();
2878     }
2879 }
2880
2881 void SpeculativeJIT::setIntTypedArrayLoadResult(Node* node, GPRReg resultReg, TypedArrayType type, bool canSpeculate)
2882 {
2883     if (elementSize(type) < 4 || isSigned(type)) {
2884         int32Result(resultReg, node);
2885         return;
2886     }
2887     
2888     ASSERT(elementSize(type) == 4 && !isSigned(type));
2889     if (node->shouldSpeculateInt32() && canSpeculate) {
2890         speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, resultReg, TrustedImm32(0)));
2891         int32Result(resultReg, node);
2892         return;
2893     }
2894     
2895 #if USE(JSVALUE64)
2896     if (node->shouldSpeculateAnyInt()) {
2897         m_jit.zeroExtend32ToPtr(resultReg, resultReg);
2898         strictInt52Result(resultReg, node);
2899         return;
2900     }
2901 #endif
2902     
2903     FPRTemporary fresult(this);
2904     m_jit.convertInt32ToDouble(resultReg, fresult.fpr());
2905     JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, resultReg, TrustedImm32(0));
2906     m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), fresult.fpr());
2907     positive.link(&m_jit);
2908     doubleResult(fresult.fpr(), node);
2909 }
2910
2911 void SpeculativeJIT::compileGetByValOnIntTypedArray(Node* node, TypedArrayType type)
2912 {
2913     ASSERT(isInt(type));
2914     
2915     SpeculateCellOperand base(this, node->child1());
2916     SpeculateStrictInt32Operand property(this, node->child2());
2917     StorageOperand storage(this, node->child3());
2918
2919     GPRReg baseReg = base.gpr();
2920     GPRReg propertyReg = property.gpr();
2921     GPRReg storageReg = storage.gpr();
2922
2923     GPRTemporary result(this);
2924     GPRReg resultReg = result.gpr();
2925
2926     ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2927
2928     emitTypedArrayBoundsCheck(node, baseReg, propertyReg);
2929     loadFromIntTypedArray(storageReg, propertyReg, resultReg, type);
2930     bool canSpeculate = true;
2931     setIntTypedArrayLoadResult(node, resultReg, type, canSpeculate);
2932 }
2933
2934 bool SpeculativeJIT::getIntTypedArrayStoreOperand(
2935     GPRTemporary& value,
2936     GPRReg property,
2937 #if USE(JSVALUE32_64)
2938     GPRTemporary& propertyTag,
2939     GPRTemporary& valueTag,
2940 #endif
2941     Edge valueUse, JITCompiler::JumpList& slowPathCases, bool isClamped)
2942 {
2943     bool isAppropriateConstant = false;
2944     if (valueUse->isConstant()) {
2945         JSValue jsValue = valueUse->asJSValue();
2946         SpeculatedType expectedType = typeFilterFor(valueUse.useKind());
2947         SpeculatedType actualType = speculationFromValue(jsValue);
2948         isAppropriateConstant = (expectedType | actualType) == expectedType;
2949     }
2950     
2951     if (isAppropriateConstant) {
2952         JSValue jsValue = valueUse->asJSValue();
2953         if (!jsValue.isNumber()) {
2954             terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
2955             return false;
2956         }
2957         double d = jsValue.asNumber();
2958         if (isClamped)
2959             d = clampDoubleToByte(d);
2960         GPRTemporary scratch(this);
2961         GPRReg scratchReg = scratch.gpr();
2962         m_jit.move(Imm32(toInt32(d)), scratchReg);
2963         value.adopt(scratch);
2964     } else {
2965         switch (valueUse.useKind()) {
2966         case Int32Use: {
2967             SpeculateInt32Operand valueOp(this, valueUse);
2968             GPRTemporary scratch(this);
2969             GPRReg scratchReg = scratch.gpr();
2970             m_jit.move(valueOp.gpr(), scratchReg);
2971             if (isClamped)
2972                 compileClampIntegerToByte(m_jit, scratchReg);
2973             value.adopt(scratch);
2974             break;
2975         }
2976             
2977 #if USE(JSVALUE64)
2978         case Int52RepUse: {
2979             SpeculateStrictInt52Operand valueOp(this, valueUse);
2980             GPRTemporary scratch(this);
2981             GPRReg scratchReg = scratch.gpr();
2982             m_jit.move(valueOp.gpr(), scratchReg);
2983             if (isClamped) {
2984                 MacroAssembler::Jump inBounds = m_jit.branch64(
2985                     MacroAssembler::BelowOrEqual, scratchReg, JITCompiler::TrustedImm64(0xff));
2986                 MacroAssembler::Jump tooBig = m_jit.branch64(
2987                     MacroAssembler::GreaterThan, scratchReg, JITCompiler::TrustedImm64(0xff));
2988                 m_jit.move(TrustedImm32(0), scratchReg);
2989                 MacroAssembler::Jump clamped = m_jit.jump();
2990                 tooBig.link(&m_jit);
2991                 m_jit.move(JITCompiler::TrustedImm32(255), scratchReg);
2992                 clamped.link(&m_jit);
2993                 inBounds.link(&m_jit);
2994             }
2995             value.adopt(scratch);
2996             break;
2997         }
2998 #endif // USE(JSVALUE64)
2999             
3000         case DoubleRepUse: {
3001             RELEASE_ASSERT(!isAtomicsIntrinsic(m_currentNode->op()));
3002             if (isClamped) {
3003                 SpeculateDoubleOperand valueOp(this, valueUse);
3004                 GPRTemporary result(this);
3005                 FPRTemporary floatScratch(this);
3006                 FPRReg fpr = valueOp.fpr();
3007                 GPRReg gpr = result.gpr();
3008                 compileClampDoubleToByte(m_jit, gpr, fpr, floatScratch.fpr());
3009                 value.adopt(result);
3010             } else {
3011 #if USE(JSVALUE32_64)
3012                 GPRTemporary realPropertyTag(this);
3013                 propertyTag.adopt(realPropertyTag);
3014                 GPRReg propertyTagGPR = propertyTag.gpr();
3015
3016                 GPRTemporary realValueTag(this);
3017                 valueTag.adopt(realValueTag);
3018                 GPRReg valueTagGPR = valueTag.gpr();
3019 #endif
3020                 SpeculateDoubleOperand valueOp(this, valueUse);
3021                 GPRTemporary result(this);
3022                 FPRReg fpr = valueOp.fpr();
3023                 GPRReg gpr = result.gpr();
3024                 MacroAssembler::Jump notNaN = m_jit.branchDouble(MacroAssembler::DoubleEqual, fpr, fpr);
3025                 m_jit.xorPtr(gpr, gpr);
3026                 MacroAssembler::JumpList fixed(m_jit.jump());
3027                 notNaN.link(&m_jit);
3028
3029                 fixed.append(m_jit.branchTruncateDoubleToInt32(
3030                     fpr, gpr, MacroAssembler::BranchIfTruncateSuccessful));
3031
3032 #if USE(JSVALUE64)
3033                 m_jit.or64(GPRInfo::tagTypeNumberRegister, property);
3034                 boxDouble(fpr, gpr);
3035 #else
3036                 UNUSED_PARAM(property);
3037                 m_jit.move(TrustedImm32(JSValue::Int32Tag), propertyTagGPR);
3038                 boxDouble(fpr, valueTagGPR, gpr);
3039 #endif
3040                 slowPathCases.append(m_jit.jump());
3041
3042                 fixed.link(&m_jit);
3043                 value.adopt(result);
3044             }
3045             break;
3046         }
3047             
3048         default:
3049             RELEASE_ASSERT_NOT_REACHED();
3050             break;
3051         }
3052     }
3053     return true;
3054 }
3055
3056 void SpeculativeJIT::compilePutByValForIntTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
3057 {
3058     ASSERT(isInt(type));
3059     
3060     StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
3061     GPRReg storageReg = storage.gpr();
3062     
3063     Edge valueUse = m_jit.graph().varArgChild(node, 2);
3064     
3065     GPRTemporary value;
3066 #if USE(JSVALUE32_64)
3067     GPRTemporary propertyTag;
3068     GPRTemporary valueTag;
3069 #endif
3070
3071     JITCompiler::JumpList slowPathCases;
3072     
3073     bool result = getIntTypedArrayStoreOperand(
3074         value, property,
3075 #if USE(JSVALUE32_64)
3076         propertyTag, valueTag,
3077 #endif
3078         valueUse, slowPathCases, isClamped(type));
3079     if (!result) {
3080         noResult(node);
3081         return;
3082     }
3083
3084     GPRReg valueGPR = value.gpr();
3085 #if USE(JSVALUE32_64)
3086     GPRReg propertyTagGPR = propertyTag.gpr();
3087     GPRReg valueTagGPR = valueTag.gpr();
3088 #endif
3089
3090     ASSERT_UNUSED(valueGPR, valueGPR != property);
3091     ASSERT(valueGPR != base);
3092     ASSERT(valueGPR != storageReg);
3093     JITCompiler::Jump outOfBounds = jumpForTypedArrayOutOfBounds(node, base, property);
3094
3095     switch (elementSize(type)) {
3096     case 1:
3097         m_jit.store8(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesOne));
3098         break;
3099     case 2:
3100         m_jit.store16(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesTwo));
3101         break;
3102     case 4:
3103         m_jit.store32(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
3104         break;
3105     default:
3106         CRASH();
3107     }
3108
3109     JITCompiler::Jump done = jumpForTypedArrayIsNeuteredIfOutOfBounds(node, base, outOfBounds);
3110     if (done.isSet())
3111         done.link(&m_jit);
3112
3113     if (!slowPathCases.empty()) {
3114 #if USE(JSVALUE64)
3115         if (node->op() == PutByValDirect) {
3116             addSlowPathGenerator(slowPathCall(
3117                 slowPathCases, this,
3118                 m_jit.isStrictModeFor(node->origin.semantic) ? operationPutByValDirectStrict : operationPutByValDirectNonStrict,
3119                 NoResult, base, property, valueGPR));
3120         } else {
3121             addSlowPathGenerator(slowPathCall(
3122                 slowPathCases, this,
3123                 m_jit.isStrictModeFor(node->origin.semantic) ? operationPutByValStrict : operationPutByValNonStrict,
3124                 NoResult, base, property, valueGPR));
3125         }
3126 #else // not USE(JSVALUE64)
3127         if (node->op() == PutByValDirect) {
3128             addSlowPathGenerator(slowPathCall(
3129                 slowPathCases, this,
3130                 m_jit.codeBlock()->isStrictMode() ? operationPutByValDirectCellStrict : operationPutByValDirectCellNonStrict,
3131                 NoResult, base, JSValueRegs(propertyTagGPR, property), JSValueRegs(valueTagGPR, valueGPR)));
3132         } else {
3133             addSlowPathGenerator(slowPathCall(
3134                 slowPathCases, this,
3135                 m_jit.codeBlock()->isStrictMode() ? operationPutByValCellStrict : operationPutByValCellNonStrict,
3136                 NoResult, base, JSValueRegs(propertyTagGPR, property), JSValueRegs(valueTagGPR, valueGPR)));
3137         }
3138 #endif
3139     }
3140     
3141     noResult(node);
3142 }
3143
3144 void SpeculativeJIT::compileGetByValOnFloatTypedArray(Node* node, TypedArrayType type)
3145 {
3146     ASSERT(isFloat(type));
3147     
3148     SpeculateCellOperand base(this, node->child1());
3149     SpeculateStrictInt32Operand property(this, node->child2());
3150     StorageOperand storage(this, node->child3());
3151
3152     GPRReg baseReg = base.gpr();
3153     GPRReg propertyReg = property.gpr();
3154     GPRReg storageReg = storage.gpr();
3155
3156     ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
3157
3158     FPRTemporary result(this);
3159     FPRReg resultReg = result.fpr();
3160     emitTypedArrayBoundsCheck(node, baseReg, propertyReg);
3161     switch (elementSize(type)) {
3162     case 4:
3163         m_jit.loadFloat(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
3164         m_jit.convertFloatToDouble(resultReg, resultReg);
3165         break;
3166     case 8: {
3167         m_jit.loadDouble(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight), resultReg);
3168         break;
3169     }
3170     default:
3171         RELEASE_ASSERT_NOT_REACHED();
3172     }
3173     
3174     doubleResult(resultReg, node);
3175 }
3176
3177 void SpeculativeJIT::compilePutByValForFloatTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
3178 {
3179     ASSERT(isFloat(type));
3180     
3181     StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
3182     GPRReg storageReg = storage.gpr();
3183     
3184     Edge baseUse = m_jit.graph().varArgChild(node, 0);
3185     Edge valueUse = m_jit.graph().varArgChild(node, 2);
3186
3187     SpeculateDoubleOperand valueOp(this, valueUse);
3188     FPRTemporary scratch(this);
3189     FPRReg valueFPR = valueOp.fpr();
3190     FPRReg scratchFPR = scratch.fpr();
3191
3192     ASSERT_UNUSED(baseUse, node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(baseUse)));
3193     
3194     MacroAssembler::Jump outOfBounds = jumpForTypedArrayOutOfBounds(node, base, property);
3195     
3196     switch (elementSize(type)) {
3197     case 4: {
3198         m_jit.moveDouble(valueFPR, scratchFPR);
3199         m_jit.convertDoubleToFloat(valueFPR, scratchFPR);
3200         m_jit.storeFloat(scratchFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
3201         break;
3202     }
3203     case 8:
3204         m_jit.storeDouble(valueFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesEight));
3205         break;
3206     default:
3207         RELEASE_ASSERT_NOT_REACHED();
3208     }
3209
3210     JITCompiler::Jump done = jumpForTypedArrayIsNeuteredIfOutOfBounds(node, base, outOfBounds);
3211     if (done.isSet())
3212         done.link(&m_jit);
3213     noResult(node);
3214 }
3215
3216 void SpeculativeJIT::compileGetByValForObjectWithString(Node* node)
3217 {
3218     SpeculateCellOperand arg1(this, node->child1());
3219     SpeculateCellOperand arg2(this, node->child2());
3220
3221     GPRReg arg1GPR = arg1.gpr();
3222     GPRReg arg2GPR = arg2.gpr();
3223
3224     speculateObject(node->child1(), arg1GPR);
3225     speculateString(node->child2(), arg2GPR);
3226
3227     GPRFlushedCallResult resultPayload(this);
3228     GPRReg resultPayloadGPR = resultPayload.gpr();
3229 #if USE(JSVALUE64)
3230     JSValueRegs resultRegs(resultPayloadGPR);
3231 #else
3232     GPRFlushedCallResult2 resultTag(this);
3233     GPRReg resultTagGPR = resultTag.gpr();
3234     JSValueRegs resultRegs(resultTagGPR, resultPayloadGPR);
3235 #endif
3236
3237     flushRegisters();
3238     callOperation(operationGetByValObjectString, extractResult(resultRegs), arg1GPR, arg2GPR);
3239     m_jit.exceptionCheck();
3240
3241     jsValueResult(resultRegs, node);
3242 }
3243
3244 void SpeculativeJIT::compileGetByValForObjectWithSymbol(Node* node)
3245 {
3246     SpeculateCellOperand arg1(this, node->child1());
3247     SpeculateCellOperand arg2(this, node->child2());
3248
3249     GPRReg arg1GPR = arg1.gpr();
3250     GPRReg arg2GPR = arg2.gpr();
3251
3252     speculateObject(node->child1(), arg1GPR);
3253     speculateSymbol(node->child2(), arg2GPR);
3254
3255     GPRFlushedCallResult resultPayload(this);
3256     GPRReg resultPayloadGPR = resultPayload.gpr();
3257 #if USE(JSVALUE64)
3258     JSValueRegs resultRegs(resultPayloadGPR);
3259 #else
3260     GPRFlushedCallResult2 resultTag(this);
3261     GPRReg resultTagGPR = resultTag.gpr();
3262     JSValueRegs resultRegs(resultTagGPR, resultPayloadGPR);
3263 #endif
3264
3265     flushRegisters();
3266     callOperation(operationGetByValObjectSymbol, extractResult(resultRegs), arg1GPR, arg2GPR);
3267     m_jit.exceptionCheck();
3268
3269     jsValueResult(resultRegs, node);
3270 }
3271
3272 void SpeculativeJIT::compilePutByValForCellWithString(Node* node, Edge& child1, Edge& child2, Edge& child3)
3273 {
3274     SpeculateCellOperand arg1(this, child1);
3275     SpeculateCellOperand arg2(this, child2);
3276     JSValueOperand arg3(this, child3);
3277
3278     GPRReg arg1GPR = arg1.gpr();
3279     GPRReg arg2GPR = arg2.gpr();
3280     JSValueRegs arg3Regs = arg3.jsValueRegs();
3281
3282     speculateString(child2, arg2GPR);
3283
3284     flushRegisters();
3285     callOperation(m_jit.isStrictModeFor(node->origin.semantic) ? operationPutByValCellStringStrict : operationPutByValCellStringNonStrict, arg1GPR, arg2GPR, arg3Regs);
3286     m_jit.exceptionCheck();
3287
3288     noResult(node);
3289 }
3290
3291 void SpeculativeJIT::compilePutByValForCellWithSymbol(Node* node, Edge& child1, Edge& child2, Edge& child3)
3292 {
3293     SpeculateCellOperand arg1(this, child1);
3294     SpeculateCellOperand arg2(this, child2);
3295     JSValueOperand arg3(this, child3);
3296
3297     GPRReg arg1GPR = arg1.gpr();
3298     GPRReg arg2GPR = arg2.gpr();
3299     JSValueRegs arg3Regs = arg3.jsValueRegs();
3300
3301     speculateSymbol(child2, arg2GPR);
3302
3303     flushRegisters();
3304     callOperation(m_jit.isStrictModeFor(node->origin.semantic) ? operationPutByValCellSymbolStrict : operationPutByValCellSymbolNonStrict, arg1GPR, arg2GPR, arg3Regs);
3305     m_jit.exceptionCheck();
3306
3307     noResult(node);
3308 }
3309
3310 void SpeculativeJIT::compileInstanceOfForObject(Node*, GPRReg valueReg, GPRReg prototypeReg, GPRReg scratchReg, GPRReg scratch2Reg, GPRReg scratch3Reg)
3311 {
3312     // Check that prototype is an object.
3313     speculationCheck(BadType, JSValueRegs(), 0, m_jit.branchIfNotObject(prototypeReg));
3314     
3315     // Initialize scratchReg with the value being checked.
3316     m_jit.move(valueReg, scratchReg);
3317     
3318     // Walk up the prototype chain of the value (in scratchReg), comparing to prototypeReg.
3319     MacroAssembler::Label loop(&m_jit);
3320     MacroAssembler::Jump performDefaultHasInstance = m_jit.branch8(MacroAssembler::Equal,
3321         MacroAssembler::Address(scratchReg, JSCell::typeInfoTypeOffset()), TrustedImm32(ProxyObjectType));
3322     m_jit.emitLoadStructure(*m_jit.vm(), scratchReg, scratch3Reg, scratch2Reg);
3323 #if USE(JSVALUE64)
3324     m_jit.load64(MacroAssembler::Address(scratch3Reg, Structure::prototypeOffset()), scratch3Reg);
3325     auto hasMonoProto = m_jit.branchTest64(JITCompiler::NonZero, scratch3Reg);
3326     m_jit.load64(JITCompiler::Address(scratchReg, offsetRelativeToBase(knownPolyProtoOffset)), scratch3Reg);
3327     hasMonoProto.link(&m_jit);
3328     m_jit.move(scratch3Reg, scratchReg);
3329 #else
3330     m_jit.load32(MacroAssembler::Address(scratch3Reg, Structure::prototypeOffset() + TagOffset), scratch2Reg);
3331     m_jit.load32(MacroAssembler::Address(scratch3Reg, Structure::prototypeOffset() + PayloadOffset), scratch3Reg);
3332     auto hasMonoProto = m_jit.branch32(CCallHelpers::NotEqual, scratch2Reg, TrustedImm32(JSValue::EmptyValueTag));
3333     m_jit.load32(JITCompiler::Address(scratchReg, offsetRelativeToBase(knownPolyProtoOffset) + PayloadOffset), scratch3Reg);
3334     hasMonoProto.link(&m_jit);
3335     m_jit.move(scratch3Reg, scratchReg);
3336 #endif
3337
3338     MacroAssembler::Jump isInstance = m_jit.branchPtr(MacroAssembler::Equal, scratchReg, prototypeReg);
3339 #if USE(JSVALUE64)
3340     m_jit.branchIfCell(JSValueRegs(scratchReg)).linkTo(loop, &m_jit);
3341 #else
3342     m_jit.branchTestPtr(MacroAssembler::NonZero, scratchReg).linkTo(loop, &m_jit);
3343 #endif
3344     
3345     // No match - result is false.
3346 #if USE(JSVALUE64)
3347     m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(