AssemblyHelpers should not have a VM field
[WebKit.git] / Source / JavaScriptCore / dfg / DFGSpeculativeJIT.cpp
1 /*
2  * Copyright (C) 2011-2017 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "DFGSpeculativeJIT.h"
28
29 #if ENABLE(DFG_JIT)
30
31 #include "BinarySwitch.h"
32 #include "DFGAbstractInterpreterInlines.h"
33 #include "DFGArrayifySlowPathGenerator.h"
34 #include "DFGCallArrayAllocatorSlowPathGenerator.h"
35 #include "DFGCallCreateDirectArgumentsSlowPathGenerator.h"
36 #include "DFGCapabilities.h"
37 #include "DFGDOMJITPatchpointParams.h"
38 #include "DFGMayExit.h"
39 #include "DFGOSRExitFuzz.h"
40 #include "DFGSaneStringGetByValSlowPathGenerator.h"
41 #include "DFGSlowPathGenerator.h"
42 #include "DOMJITPatchpoint.h"
43 #include "DirectArguments.h"
44 #include "JITAddGenerator.h"
45 #include "JITBitAndGenerator.h"
46 #include "JITBitOrGenerator.h"
47 #include "JITBitXorGenerator.h"
48 #include "JITDivGenerator.h"
49 #include "JITLeftShiftGenerator.h"
50 #include "JITMulGenerator.h"
51 #include "JITRightShiftGenerator.h"
52 #include "JITSubGenerator.h"
53 #include "JSAsyncFunction.h"
54 #include "JSCInlines.h"
55 #include "JSEnvironmentRecord.h"
56 #include "JSFixedArray.h"
57 #include "JSGeneratorFunction.h"
58 #include "JSLexicalEnvironment.h"
59 #include "LinkBuffer.h"
60 #include "RegExpConstructor.h"
61 #include "ScopedArguments.h"
62 #include "ScratchRegisterAllocator.h"
63 #include <wtf/BitVector.h>
64 #include <wtf/Box.h>
65 #include <wtf/MathExtras.h>
66
67 namespace JSC { namespace DFG {
68
69 SpeculativeJIT::SpeculativeJIT(JITCompiler& jit)
70     : m_compileOkay(true)
71     , m_jit(jit)
72     , m_currentNode(0)
73     , m_lastGeneratedNode(LastNodeType)
74     , m_indexInBlock(0)
75     , m_generationInfo(m_jit.graph().frameRegisterCount())
76     , m_state(m_jit.graph())
77     , m_interpreter(m_jit.graph(), m_state)
78     , m_stream(&jit.jitCode()->variableEventStream)
79     , m_minifiedGraph(&jit.jitCode()->minifiedDFG)
80 {
81 }
82
83 SpeculativeJIT::~SpeculativeJIT()
84 {
85 }
86
87 void SpeculativeJIT::emitAllocateRawObject(GPRReg resultGPR, RegisteredStructure structure, GPRReg storageGPR, unsigned numElements, unsigned vectorLength)
88 {
89     IndexingType indexingType = structure->indexingType();
90     bool hasIndexingHeader = hasIndexedProperties(indexingType);
91
92     unsigned inlineCapacity = structure->inlineCapacity();
93     unsigned outOfLineCapacity = structure->outOfLineCapacity();
94     
95     GPRTemporary scratch(this);
96     GPRTemporary scratch2(this);
97     GPRReg scratchGPR = scratch.gpr();
98     GPRReg scratch2GPR = scratch2.gpr();
99
100     ASSERT(vectorLength >= numElements);
101     vectorLength = Butterfly::optimalContiguousVectorLength(structure.get(), vectorLength);
102     
103     JITCompiler::JumpList slowCases;
104
105     size_t size = 0;
106     if (hasIndexingHeader)
107         size += vectorLength * sizeof(JSValue) + sizeof(IndexingHeader);
108     size += outOfLineCapacity * sizeof(JSValue);
109
110     m_jit.move(TrustedImmPtr(0), storageGPR);
111     
112     if (size) {
113         if (MarkedAllocator* allocator = m_jit.vm()->auxiliarySpace.allocatorFor(size)) {
114             m_jit.move(TrustedImmPtr(allocator), scratchGPR);
115             m_jit.emitAllocate(storageGPR, allocator, scratchGPR, scratch2GPR, slowCases);
116             
117             m_jit.addPtr(
118                 TrustedImm32(outOfLineCapacity * sizeof(JSValue) + sizeof(IndexingHeader)),
119                 storageGPR);
120             
121             if (hasIndexingHeader)
122                 m_jit.store32(TrustedImm32(vectorLength), MacroAssembler::Address(storageGPR, Butterfly::offsetOfVectorLength()));
123         } else
124             slowCases.append(m_jit.jump());
125     }
126
127     size_t allocationSize = JSFinalObject::allocationSize(inlineCapacity);
128     MarkedAllocator* allocatorPtr = subspaceFor<JSFinalObject>(*m_jit.vm())->allocatorFor(allocationSize);
129     if (allocatorPtr) {
130         m_jit.move(TrustedImmPtr(allocatorPtr), scratchGPR);
131         emitAllocateJSObject(resultGPR, allocatorPtr, scratchGPR, TrustedImmPtr(structure), storageGPR, scratch2GPR, slowCases);
132         m_jit.emitInitializeInlineStorage(resultGPR, structure->inlineCapacity());
133     } else
134         slowCases.append(m_jit.jump());
135
136     // I want a slow path that also loads out the storage pointer, and that's
137     // what this custom CallArrayAllocatorSlowPathGenerator gives me. It's a lot
138     // of work for a very small piece of functionality. :-/
139     addSlowPathGenerator(std::make_unique<CallArrayAllocatorSlowPathGenerator>(
140         slowCases, this, operationNewRawObject, resultGPR, storageGPR,
141         structure, vectorLength));
142
143     if (numElements < vectorLength) {
144 #if USE(JSVALUE64)
145         if (hasDouble(structure->indexingType()))
146             m_jit.move(TrustedImm64(bitwise_cast<int64_t>(PNaN)), scratchGPR);
147         else
148             m_jit.move(TrustedImm64(JSValue::encode(JSValue())), scratchGPR);
149         for (unsigned i = numElements; i < vectorLength; ++i)
150             m_jit.store64(scratchGPR, MacroAssembler::Address(storageGPR, sizeof(double) * i));
151 #else
152         EncodedValueDescriptor value;
153         if (hasDouble(structure->indexingType()))
154             value.asInt64 = JSValue::encode(JSValue(JSValue::EncodeAsDouble, PNaN));
155         else
156             value.asInt64 = JSValue::encode(JSValue());
157         for (unsigned i = numElements; i < vectorLength; ++i) {
158             m_jit.store32(TrustedImm32(value.asBits.tag), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
159             m_jit.store32(TrustedImm32(value.asBits.payload), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
160         }
161 #endif
162     }
163     
164     if (hasIndexingHeader)
165         m_jit.store32(TrustedImm32(numElements), MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()));
166     
167     m_jit.emitInitializeOutOfLineStorage(storageGPR, structure->outOfLineCapacity());
168     
169     m_jit.mutatorFence(*m_jit.vm());
170 }
171
172 void SpeculativeJIT::emitGetLength(InlineCallFrame* inlineCallFrame, GPRReg lengthGPR, bool includeThis)
173 {
174     if (inlineCallFrame && !inlineCallFrame->isVarargs())
175         m_jit.move(TrustedImm32(inlineCallFrame->arguments.size() - !includeThis), lengthGPR);
176     else {
177         VirtualRegister argumentCountRegister = m_jit.argumentCount(inlineCallFrame);
178         m_jit.load32(JITCompiler::payloadFor(argumentCountRegister), lengthGPR);
179         if (!includeThis)
180             m_jit.sub32(TrustedImm32(1), lengthGPR);
181     }
182 }
183
184 void SpeculativeJIT::emitGetLength(CodeOrigin origin, GPRReg lengthGPR, bool includeThis)
185 {
186     emitGetLength(origin.inlineCallFrame, lengthGPR, includeThis);
187 }
188
189 void SpeculativeJIT::emitGetCallee(CodeOrigin origin, GPRReg calleeGPR)
190 {
191     if (origin.inlineCallFrame) {
192         if (origin.inlineCallFrame->isClosureCall) {
193             m_jit.loadPtr(
194                 JITCompiler::addressFor(origin.inlineCallFrame->calleeRecovery.virtualRegister()),
195                 calleeGPR);
196         } else {
197             m_jit.move(
198                 TrustedImmPtr::weakPointer(m_jit.graph(), origin.inlineCallFrame->calleeRecovery.constant().asCell()),
199                 calleeGPR);
200         }
201     } else
202         m_jit.loadPtr(JITCompiler::addressFor(CallFrameSlot::callee), calleeGPR);
203 }
204
205 void SpeculativeJIT::emitGetArgumentStart(CodeOrigin origin, GPRReg startGPR)
206 {
207     m_jit.addPtr(
208         TrustedImm32(
209             JITCompiler::argumentsStart(origin).offset() * static_cast<int>(sizeof(Register))),
210         GPRInfo::callFrameRegister, startGPR);
211 }
212
213 MacroAssembler::Jump SpeculativeJIT::emitOSRExitFuzzCheck()
214 {
215     if (!Options::useOSRExitFuzz()
216         || !canUseOSRExitFuzzing(m_jit.graph().baselineCodeBlockFor(m_origin.semantic))
217         || !doOSRExitFuzzing())
218         return MacroAssembler::Jump();
219     
220     MacroAssembler::Jump result;
221     
222     m_jit.pushToSave(GPRInfo::regT0);
223     m_jit.load32(&g_numberOfOSRExitFuzzChecks, GPRInfo::regT0);
224     m_jit.add32(TrustedImm32(1), GPRInfo::regT0);
225     m_jit.store32(GPRInfo::regT0, &g_numberOfOSRExitFuzzChecks);
226     unsigned atOrAfter = Options::fireOSRExitFuzzAtOrAfter();
227     unsigned at = Options::fireOSRExitFuzzAt();
228     if (at || atOrAfter) {
229         unsigned threshold;
230         MacroAssembler::RelationalCondition condition;
231         if (atOrAfter) {
232             threshold = atOrAfter;
233             condition = MacroAssembler::Below;
234         } else {
235             threshold = at;
236             condition = MacroAssembler::NotEqual;
237         }
238         MacroAssembler::Jump ok = m_jit.branch32(
239             condition, GPRInfo::regT0, MacroAssembler::TrustedImm32(threshold));
240         m_jit.popToRestore(GPRInfo::regT0);
241         result = m_jit.jump();
242         ok.link(&m_jit);
243     }
244     m_jit.popToRestore(GPRInfo::regT0);
245     
246     return result;
247 }
248
249 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail)
250 {
251     if (!m_compileOkay)
252         return;
253     JITCompiler::Jump fuzzJump = emitOSRExitFuzzCheck();
254     if (fuzzJump.isSet()) {
255         JITCompiler::JumpList jumpsToFail;
256         jumpsToFail.append(fuzzJump);
257         jumpsToFail.append(jumpToFail);
258         m_jit.appendExitInfo(jumpsToFail);
259     } else
260         m_jit.appendExitInfo(jumpToFail);
261     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(m_currentNode, node), this, m_stream->size()));
262 }
263
264 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, const MacroAssembler::JumpList& jumpsToFail)
265 {
266     if (!m_compileOkay)
267         return;
268     JITCompiler::Jump fuzzJump = emitOSRExitFuzzCheck();
269     if (fuzzJump.isSet()) {
270         JITCompiler::JumpList myJumpsToFail;
271         myJumpsToFail.append(jumpsToFail);
272         myJumpsToFail.append(fuzzJump);
273         m_jit.appendExitInfo(myJumpsToFail);
274     } else
275         m_jit.appendExitInfo(jumpsToFail);
276     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(m_currentNode, node), this, m_stream->size()));
277 }
278
279 OSRExitJumpPlaceholder SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node)
280 {
281     if (!m_compileOkay)
282         return OSRExitJumpPlaceholder();
283     unsigned index = m_jit.jitCode()->osrExit.size();
284     m_jit.appendExitInfo();
285     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(m_currentNode, node), this, m_stream->size()));
286     return OSRExitJumpPlaceholder(index);
287 }
288
289 OSRExitJumpPlaceholder SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse)
290 {
291     return speculationCheck(kind, jsValueSource, nodeUse.node());
292 }
293
294 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail)
295 {
296     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail);
297 }
298
299 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, const MacroAssembler::JumpList& jumpsToFail)
300 {
301     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpsToFail);
302 }
303
304 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
305 {
306     if (!m_compileOkay)
307         return;
308     unsigned recoveryIndex = m_jit.jitCode()->appendSpeculationRecovery(recovery);
309     m_jit.appendExitInfo(jumpToFail);
310     m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(m_currentNode, node), this, m_stream->size(), recoveryIndex));
311 }
312
313 void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
314 {
315     speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail, recovery);
316 }
317
318 void SpeculativeJIT::emitInvalidationPoint(Node* node)
319 {
320     if (!m_compileOkay)
321         return;
322     OSRExitCompilationInfo& info = m_jit.appendExitInfo(JITCompiler::JumpList());
323     m_jit.jitCode()->appendOSRExit(OSRExit(
324         UncountableInvalidation, JSValueSource(), MethodOfGettingAValueProfile(),
325         this, m_stream->size()));
326     info.m_replacementSource = m_jit.watchpointLabel();
327     ASSERT(info.m_replacementSource.isSet());
328     noResult(node);
329 }
330
331 void SpeculativeJIT::unreachable(Node* node)
332 {
333     m_compileOkay = false;
334     m_jit.abortWithReason(DFGUnreachableNode, node->op());
335 }
336
337 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Node* node)
338 {
339     if (!m_compileOkay)
340         return;
341     speculationCheck(kind, jsValueRegs, node, m_jit.jump());
342     m_compileOkay = false;
343     if (verboseCompilationEnabled())
344         dataLog("Bailing compilation.\n");
345 }
346
347 void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Edge nodeUse)
348 {
349     terminateSpeculativeExecution(kind, jsValueRegs, nodeUse.node());
350 }
351
352 void SpeculativeJIT::typeCheck(JSValueSource source, Edge edge, SpeculatedType typesPassedThrough, MacroAssembler::Jump jumpToFail, ExitKind exitKind)
353 {
354     ASSERT(needsTypeCheck(edge, typesPassedThrough));
355     m_interpreter.filter(edge, typesPassedThrough);
356     speculationCheck(exitKind, source, edge.node(), jumpToFail);
357 }
358
359 RegisterSet SpeculativeJIT::usedRegisters()
360 {
361     RegisterSet result;
362     
363     for (unsigned i = GPRInfo::numberOfRegisters; i--;) {
364         GPRReg gpr = GPRInfo::toRegister(i);
365         if (m_gprs.isInUse(gpr))
366             result.set(gpr);
367     }
368     for (unsigned i = FPRInfo::numberOfRegisters; i--;) {
369         FPRReg fpr = FPRInfo::toRegister(i);
370         if (m_fprs.isInUse(fpr))
371             result.set(fpr);
372     }
373     
374     result.merge(RegisterSet::stubUnavailableRegisters());
375     
376     return result;
377 }
378
379 void SpeculativeJIT::addSlowPathGenerator(std::unique_ptr<SlowPathGenerator> slowPathGenerator)
380 {
381     m_slowPathGenerators.append(WTFMove(slowPathGenerator));
382 }
383
384 void SpeculativeJIT::addSlowPathGenerator(std::function<void()> lambda)
385 {
386     m_slowPathLambdas.append(SlowPathLambda{ lambda, m_currentNode, static_cast<unsigned>(m_stream->size()) });
387 }
388
389 void SpeculativeJIT::runSlowPathGenerators(PCToCodeOriginMapBuilder& pcToCodeOriginMapBuilder)
390 {
391     for (auto& slowPathGenerator : m_slowPathGenerators) {
392         pcToCodeOriginMapBuilder.appendItem(m_jit.labelIgnoringWatchpoints(), slowPathGenerator->origin().semantic);
393         slowPathGenerator->generate(this);
394     }
395     for (auto& slowPathLambda : m_slowPathLambdas) {
396         Node* currentNode = slowPathLambda.currentNode;
397         m_currentNode = currentNode;
398         m_outOfLineStreamIndex = slowPathLambda.streamIndex;
399         pcToCodeOriginMapBuilder.appendItem(m_jit.labelIgnoringWatchpoints(), currentNode->origin.semantic);
400         slowPathLambda.generator();
401         m_outOfLineStreamIndex = std::nullopt;
402     }
403 }
404
405 void SpeculativeJIT::clearGenerationInfo()
406 {
407     for (unsigned i = 0; i < m_generationInfo.size(); ++i)
408         m_generationInfo[i] = GenerationInfo();
409     m_gprs = RegisterBank<GPRInfo>();
410     m_fprs = RegisterBank<FPRInfo>();
411 }
412
413 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForGPR(VirtualRegister spillMe, GPRReg source)
414 {
415     GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
416     Node* node = info.node();
417     DataFormat registerFormat = info.registerFormat();
418     ASSERT(registerFormat != DataFormatNone);
419     ASSERT(registerFormat != DataFormatDouble);
420         
421     SilentSpillAction spillAction;
422     SilentFillAction fillAction;
423         
424     if (!info.needsSpill())
425         spillAction = DoNothingForSpill;
426     else {
427 #if USE(JSVALUE64)
428         ASSERT(info.gpr() == source);
429         if (registerFormat == DataFormatInt32)
430             spillAction = Store32Payload;
431         else if (registerFormat == DataFormatCell || registerFormat == DataFormatStorage)
432             spillAction = StorePtr;
433         else if (registerFormat == DataFormatInt52 || registerFormat == DataFormatStrictInt52)
434             spillAction = Store64;
435         else {
436             ASSERT(registerFormat & DataFormatJS);
437             spillAction = Store64;
438         }
439 #elif USE(JSVALUE32_64)
440         if (registerFormat & DataFormatJS) {
441             ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
442             spillAction = source == info.tagGPR() ? Store32Tag : Store32Payload;
443         } else {
444             ASSERT(info.gpr() == source);
445             spillAction = Store32Payload;
446         }
447 #endif
448     }
449         
450     if (registerFormat == DataFormatInt32) {
451         ASSERT(info.gpr() == source);
452         ASSERT(isJSInt32(info.registerFormat()));
453         if (node->hasConstant()) {
454             ASSERT(node->isInt32Constant());
455             fillAction = SetInt32Constant;
456         } else
457             fillAction = Load32Payload;
458     } else if (registerFormat == DataFormatBoolean) {
459 #if USE(JSVALUE64)
460         RELEASE_ASSERT_NOT_REACHED();
461 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
462         fillAction = DoNothingForFill;
463 #endif
464 #elif USE(JSVALUE32_64)
465         ASSERT(info.gpr() == source);
466         if (node->hasConstant()) {
467             ASSERT(node->isBooleanConstant());
468             fillAction = SetBooleanConstant;
469         } else
470             fillAction = Load32Payload;
471 #endif
472     } else if (registerFormat == DataFormatCell) {
473         ASSERT(info.gpr() == source);
474         if (node->hasConstant()) {
475             DFG_ASSERT(m_jit.graph(), m_currentNode, node->isCellConstant());
476             node->asCell(); // To get the assertion.
477             fillAction = SetCellConstant;
478         } else {
479 #if USE(JSVALUE64)
480             fillAction = LoadPtr;
481 #else
482             fillAction = Load32Payload;
483 #endif
484         }
485     } else if (registerFormat == DataFormatStorage) {
486         ASSERT(info.gpr() == source);
487         fillAction = LoadPtr;
488     } else if (registerFormat == DataFormatInt52) {
489         if (node->hasConstant())
490             fillAction = SetInt52Constant;
491         else if (info.spillFormat() == DataFormatInt52)
492             fillAction = Load64;
493         else if (info.spillFormat() == DataFormatStrictInt52)
494             fillAction = Load64ShiftInt52Left;
495         else if (info.spillFormat() == DataFormatNone)
496             fillAction = Load64;
497         else {
498             RELEASE_ASSERT_NOT_REACHED();
499 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
500             fillAction = Load64; // Make GCC happy.
501 #endif
502         }
503     } else if (registerFormat == DataFormatStrictInt52) {
504         if (node->hasConstant())
505             fillAction = SetStrictInt52Constant;
506         else if (info.spillFormat() == DataFormatInt52)
507             fillAction = Load64ShiftInt52Right;
508         else if (info.spillFormat() == DataFormatStrictInt52)
509             fillAction = Load64;
510         else if (info.spillFormat() == DataFormatNone)
511             fillAction = Load64;
512         else {
513             RELEASE_ASSERT_NOT_REACHED();
514 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
515             fillAction = Load64; // Make GCC happy.
516 #endif
517         }
518     } else {
519         ASSERT(registerFormat & DataFormatJS);
520 #if USE(JSVALUE64)
521         ASSERT(info.gpr() == source);
522         if (node->hasConstant()) {
523             if (node->isCellConstant())
524                 fillAction = SetTrustedJSConstant;
525             else
526                 fillAction = SetJSConstant;
527         } else if (info.spillFormat() == DataFormatInt32) {
528             ASSERT(registerFormat == DataFormatJSInt32);
529             fillAction = Load32PayloadBoxInt;
530         } else
531             fillAction = Load64;
532 #else
533         ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
534         if (node->hasConstant())
535             fillAction = info.tagGPR() == source ? SetJSConstantTag : SetJSConstantPayload;
536         else if (info.payloadGPR() == source)
537             fillAction = Load32Payload;
538         else { // Fill the Tag
539             switch (info.spillFormat()) {
540             case DataFormatInt32:
541                 ASSERT(registerFormat == DataFormatJSInt32);
542                 fillAction = SetInt32Tag;
543                 break;
544             case DataFormatCell:
545                 ASSERT(registerFormat == DataFormatJSCell);
546                 fillAction = SetCellTag;
547                 break;
548             case DataFormatBoolean:
549                 ASSERT(registerFormat == DataFormatJSBoolean);
550                 fillAction = SetBooleanTag;
551                 break;
552             default:
553                 fillAction = Load32Tag;
554                 break;
555             }
556         }
557 #endif
558     }
559         
560     return SilentRegisterSavePlan(spillAction, fillAction, node, source);
561 }
562     
563 SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForFPR(VirtualRegister spillMe, FPRReg source)
564 {
565     GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
566     Node* node = info.node();
567     ASSERT(info.registerFormat() == DataFormatDouble);
568
569     SilentSpillAction spillAction;
570     SilentFillAction fillAction;
571         
572     if (!info.needsSpill())
573         spillAction = DoNothingForSpill;
574     else {
575         ASSERT(!node->hasConstant());
576         ASSERT(info.spillFormat() == DataFormatNone);
577         ASSERT(info.fpr() == source);
578         spillAction = StoreDouble;
579     }
580         
581 #if USE(JSVALUE64)
582     if (node->hasConstant()) {
583         node->asNumber(); // To get the assertion.
584         fillAction = SetDoubleConstant;
585     } else {
586         ASSERT(info.spillFormat() == DataFormatNone || info.spillFormat() == DataFormatDouble);
587         fillAction = LoadDouble;
588     }
589 #elif USE(JSVALUE32_64)
590     ASSERT(info.registerFormat() == DataFormatDouble);
591     if (node->hasConstant()) {
592         node->asNumber(); // To get the assertion.
593         fillAction = SetDoubleConstant;
594     } else
595         fillAction = LoadDouble;
596 #endif
597
598     return SilentRegisterSavePlan(spillAction, fillAction, node, source);
599 }
600     
601 void SpeculativeJIT::silentSpill(const SilentRegisterSavePlan& plan)
602 {
603     switch (plan.spillAction()) {
604     case DoNothingForSpill:
605         break;
606     case Store32Tag:
607         m_jit.store32(plan.gpr(), JITCompiler::tagFor(plan.node()->virtualRegister()));
608         break;
609     case Store32Payload:
610         m_jit.store32(plan.gpr(), JITCompiler::payloadFor(plan.node()->virtualRegister()));
611         break;
612     case StorePtr:
613         m_jit.storePtr(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
614         break;
615 #if USE(JSVALUE64)
616     case Store64:
617         m_jit.store64(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
618         break;
619 #endif
620     case StoreDouble:
621         m_jit.storeDouble(plan.fpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
622         break;
623     default:
624         RELEASE_ASSERT_NOT_REACHED();
625     }
626 }
627     
628 void SpeculativeJIT::silentFill(const SilentRegisterSavePlan& plan, GPRReg canTrample)
629 {
630 #if USE(JSVALUE32_64)
631     UNUSED_PARAM(canTrample);
632 #endif
633     switch (plan.fillAction()) {
634     case DoNothingForFill:
635         break;
636     case SetInt32Constant:
637         m_jit.move(Imm32(plan.node()->asInt32()), plan.gpr());
638         break;
639 #if USE(JSVALUE64)
640     case SetInt52Constant:
641         m_jit.move(Imm64(plan.node()->asAnyInt() << JSValue::int52ShiftAmount), plan.gpr());
642         break;
643     case SetStrictInt52Constant:
644         m_jit.move(Imm64(plan.node()->asAnyInt()), plan.gpr());
645         break;
646 #endif // USE(JSVALUE64)
647     case SetBooleanConstant:
648         m_jit.move(TrustedImm32(plan.node()->asBoolean()), plan.gpr());
649         break;
650     case SetCellConstant:
651         ASSERT(plan.node()->constant()->value().isCell());
652         m_jit.move(TrustedImmPtr(plan.node()->constant()), plan.gpr());
653         break;
654 #if USE(JSVALUE64)
655     case SetTrustedJSConstant:
656         m_jit.move(valueOfJSConstantAsImm64(plan.node()).asTrustedImm64(), plan.gpr());
657         break;
658     case SetJSConstant:
659         m_jit.move(valueOfJSConstantAsImm64(plan.node()), plan.gpr());
660         break;
661     case SetDoubleConstant:
662         m_jit.move(Imm64(reinterpretDoubleToInt64(plan.node()->asNumber())), canTrample);
663         m_jit.move64ToDouble(canTrample, plan.fpr());
664         break;
665     case Load32PayloadBoxInt:
666         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
667         m_jit.or64(GPRInfo::tagTypeNumberRegister, plan.gpr());
668         break;
669     case Load32PayloadConvertToInt52:
670         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
671         m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
672         m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
673         break;
674     case Load32PayloadSignExtend:
675         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
676         m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
677         break;
678 #else
679     case SetJSConstantTag:
680         m_jit.move(Imm32(plan.node()->asJSValue().tag()), plan.gpr());
681         break;
682     case SetJSConstantPayload:
683         m_jit.move(Imm32(plan.node()->asJSValue().payload()), plan.gpr());
684         break;
685     case SetInt32Tag:
686         m_jit.move(TrustedImm32(JSValue::Int32Tag), plan.gpr());
687         break;
688     case SetCellTag:
689         m_jit.move(TrustedImm32(JSValue::CellTag), plan.gpr());
690         break;
691     case SetBooleanTag:
692         m_jit.move(TrustedImm32(JSValue::BooleanTag), plan.gpr());
693         break;
694     case SetDoubleConstant:
695         m_jit.loadDouble(TrustedImmPtr(m_jit.addressOfDoubleConstant(plan.node())), plan.fpr());
696         break;
697 #endif
698     case Load32Tag:
699         m_jit.load32(JITCompiler::tagFor(plan.node()->virtualRegister()), plan.gpr());
700         break;
701     case Load32Payload:
702         m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
703         break;
704     case LoadPtr:
705         m_jit.loadPtr(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
706         break;
707 #if USE(JSVALUE64)
708     case Load64:
709         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
710         break;
711     case Load64ShiftInt52Right:
712         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
713         m_jit.rshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
714         break;
715     case Load64ShiftInt52Left:
716         m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
717         m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
718         break;
719 #endif
720     case LoadDouble:
721         m_jit.loadDouble(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.fpr());
722         break;
723     default:
724         RELEASE_ASSERT_NOT_REACHED();
725     }
726 }
727     
728 JITCompiler::Jump SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode, IndexingType shape)
729 {
730     switch (arrayMode.arrayClass()) {
731     case Array::OriginalArray: {
732         CRASH();
733 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
734         JITCompiler::Jump result; // I already know that VC++ takes unkindly to the expression "return Jump()", so I'm doing it this way in anticipation of someone eventually using VC++ to compile the DFG.
735         return result;
736 #endif
737     }
738         
739     case Array::Array:
740         m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
741         return m_jit.branch32(
742             MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | shape));
743         
744     case Array::NonArray:
745     case Array::OriginalNonArray:
746         m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
747         return m_jit.branch32(
748             MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
749         
750     case Array::PossiblyArray:
751         m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
752         return m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
753     }
754     
755     RELEASE_ASSERT_NOT_REACHED();
756     return JITCompiler::Jump();
757 }
758
759 JITCompiler::JumpList SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode)
760 {
761     JITCompiler::JumpList result;
762     
763     switch (arrayMode.type()) {
764     case Array::Int32:
765         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, Int32Shape);
766
767     case Array::Double:
768         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, DoubleShape);
769
770     case Array::Contiguous:
771         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, ContiguousShape);
772
773     case Array::Undecided:
774         return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, UndecidedShape);
775
776     case Array::ArrayStorage:
777     case Array::SlowPutArrayStorage: {
778         ASSERT(!arrayMode.isJSArrayWithOriginalStructure());
779         
780         if (arrayMode.isJSArray()) {
781             if (arrayMode.isSlowPut()) {
782                 result.append(
783                     m_jit.branchTest32(
784                         MacroAssembler::Zero, tempGPR, MacroAssembler::TrustedImm32(IsArray)));
785                 m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
786                 m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
787                 result.append(
788                     m_jit.branch32(
789                         MacroAssembler::Above, tempGPR,
790                         TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
791                 break;
792             }
793             m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
794             result.append(
795                 m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | ArrayStorageShape)));
796             break;
797         }
798         m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
799         if (arrayMode.isSlowPut()) {
800             m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
801             result.append(
802                 m_jit.branch32(
803                     MacroAssembler::Above, tempGPR,
804                     TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
805             break;
806         }
807         result.append(
808             m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(ArrayStorageShape)));
809         break;
810     }
811     default:
812         CRASH();
813         break;
814     }
815     
816     return result;
817 }
818
819 void SpeculativeJIT::checkArray(Node* node)
820 {
821     ASSERT(node->arrayMode().isSpecific());
822     ASSERT(!node->arrayMode().doesConversion());
823     
824     SpeculateCellOperand base(this, node->child1());
825     GPRReg baseReg = base.gpr();
826     
827     if (node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1()))) {
828         noResult(m_currentNode);
829         return;
830     }
831     
832     const ClassInfo* expectedClassInfo = 0;
833     
834     switch (node->arrayMode().type()) {
835     case Array::AnyTypedArray:
836     case Array::String:
837         RELEASE_ASSERT_NOT_REACHED(); // Should have been a Phantom(String:)
838         break;
839     case Array::Int32:
840     case Array::Double:
841     case Array::Contiguous:
842     case Array::Undecided:
843     case Array::ArrayStorage:
844     case Array::SlowPutArrayStorage: {
845         GPRTemporary temp(this);
846         GPRReg tempGPR = temp.gpr();
847         m_jit.load8(MacroAssembler::Address(baseReg, JSCell::indexingTypeAndMiscOffset()), tempGPR);
848         speculationCheck(
849             BadIndexingType, JSValueSource::unboxedCell(baseReg), 0,
850             jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
851         
852         noResult(m_currentNode);
853         return;
854     }
855     case Array::DirectArguments:
856         speculateCellTypeWithoutTypeFiltering(node->child1(), baseReg, DirectArgumentsType);
857         noResult(m_currentNode);
858         return;
859     case Array::ScopedArguments:
860         speculateCellTypeWithoutTypeFiltering(node->child1(), baseReg, ScopedArgumentsType);
861         noResult(m_currentNode);
862         return;
863     default:
864         speculateCellTypeWithoutTypeFiltering(
865             node->child1(), baseReg,
866             typeForTypedArrayType(node->arrayMode().typedArrayType()));
867         noResult(m_currentNode);
868         return;
869     }
870     
871     RELEASE_ASSERT(expectedClassInfo);
872     
873     GPRTemporary temp(this);
874     GPRTemporary temp2(this);
875     m_jit.emitLoadStructure(*m_jit.vm(), baseReg, temp.gpr(), temp2.gpr());
876     speculationCheck(
877         BadType, JSValueSource::unboxedCell(baseReg), node,
878         m_jit.branchPtr(
879             MacroAssembler::NotEqual,
880             MacroAssembler::Address(temp.gpr(), Structure::classInfoOffset()),
881             TrustedImmPtr(expectedClassInfo)));
882     
883     noResult(m_currentNode);
884 }
885
886 void SpeculativeJIT::arrayify(Node* node, GPRReg baseReg, GPRReg propertyReg)
887 {
888     ASSERT(node->arrayMode().doesConversion());
889     
890     GPRTemporary temp(this);
891     GPRTemporary structure;
892     GPRReg tempGPR = temp.gpr();
893     GPRReg structureGPR = InvalidGPRReg;
894     
895     if (node->op() != ArrayifyToStructure) {
896         GPRTemporary realStructure(this);
897         structure.adopt(realStructure);
898         structureGPR = structure.gpr();
899     }
900         
901     // We can skip all that comes next if we already have array storage.
902     MacroAssembler::JumpList slowPath;
903     
904     if (node->op() == ArrayifyToStructure) {
905         slowPath.append(m_jit.branchWeakStructure(
906             JITCompiler::NotEqual,
907             JITCompiler::Address(baseReg, JSCell::structureIDOffset()),
908             node->structure()));
909     } else {
910         m_jit.load8(
911             MacroAssembler::Address(baseReg, JSCell::indexingTypeAndMiscOffset()), tempGPR);
912         
913         slowPath.append(jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
914     }
915     
916     addSlowPathGenerator(std::make_unique<ArrayifySlowPathGenerator>(
917         slowPath, this, node, baseReg, propertyReg, tempGPR, structureGPR));
918     
919     noResult(m_currentNode);
920 }
921
922 void SpeculativeJIT::arrayify(Node* node)
923 {
924     ASSERT(node->arrayMode().isSpecific());
925     
926     SpeculateCellOperand base(this, node->child1());
927     
928     if (!node->child2()) {
929         arrayify(node, base.gpr(), InvalidGPRReg);
930         return;
931     }
932     
933     SpeculateInt32Operand property(this, node->child2());
934     
935     arrayify(node, base.gpr(), property.gpr());
936 }
937
938 GPRReg SpeculativeJIT::fillStorage(Edge edge)
939 {
940     VirtualRegister virtualRegister = edge->virtualRegister();
941     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
942     
943     switch (info.registerFormat()) {
944     case DataFormatNone: {
945         if (info.spillFormat() == DataFormatStorage) {
946             GPRReg gpr = allocate();
947             m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
948             m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr);
949             info.fillStorage(*m_stream, gpr);
950             return gpr;
951         }
952         
953         // Must be a cell; fill it as a cell and then return the pointer.
954         return fillSpeculateCell(edge);
955     }
956         
957     case DataFormatStorage: {
958         GPRReg gpr = info.gpr();
959         m_gprs.lock(gpr);
960         return gpr;
961     }
962         
963     default:
964         return fillSpeculateCell(edge);
965     }
966 }
967
968 void SpeculativeJIT::useChildren(Node* node)
969 {
970     if (node->flags() & NodeHasVarArgs) {
971         for (unsigned childIdx = node->firstChild(); childIdx < node->firstChild() + node->numChildren(); childIdx++) {
972             if (!!m_jit.graph().m_varArgChildren[childIdx])
973                 use(m_jit.graph().m_varArgChildren[childIdx]);
974         }
975     } else {
976         Edge child1 = node->child1();
977         if (!child1) {
978             ASSERT(!node->child2() && !node->child3());
979             return;
980         }
981         use(child1);
982         
983         Edge child2 = node->child2();
984         if (!child2) {
985             ASSERT(!node->child3());
986             return;
987         }
988         use(child2);
989         
990         Edge child3 = node->child3();
991         if (!child3)
992             return;
993         use(child3);
994     }
995 }
996
997 void SpeculativeJIT::compileTryGetById(Node* node)
998 {
999     switch (node->child1().useKind()) {
1000     case CellUse: {
1001         SpeculateCellOperand base(this, node->child1());
1002         JSValueRegsTemporary result(this, Reuse, base);
1003
1004         JSValueRegs baseRegs = JSValueRegs::payloadOnly(base.gpr());
1005         JSValueRegs resultRegs = result.regs();
1006
1007         base.use();
1008
1009         cachedGetById(node->origin.semantic, baseRegs, resultRegs, node->identifierNumber(), JITCompiler::Jump(), NeedToSpill, AccessType::TryGet);
1010
1011         jsValueResult(resultRegs, node, DataFormatJS, UseChildrenCalledExplicitly);
1012         break;
1013     }
1014
1015     case UntypedUse: {
1016         JSValueOperand base(this, node->child1());
1017         JSValueRegsTemporary result(this, Reuse, base);
1018
1019         JSValueRegs baseRegs = base.jsValueRegs();
1020         JSValueRegs resultRegs = result.regs();
1021
1022         base.use();
1023
1024         JITCompiler::Jump notCell = m_jit.branchIfNotCell(baseRegs);
1025
1026         cachedGetById(node->origin.semantic, baseRegs, resultRegs, node->identifierNumber(), notCell, NeedToSpill, AccessType::TryGet);
1027
1028         jsValueResult(resultRegs, node, DataFormatJS, UseChildrenCalledExplicitly);
1029         break;
1030     }
1031
1032     default:
1033         DFG_CRASH(m_jit.graph(), node, "Bad use kind");
1034         break;
1035     } 
1036 }
1037
1038 void SpeculativeJIT::compileIn(Node* node)
1039 {
1040     SpeculateCellOperand base(this, node->child1());
1041     GPRReg baseGPR = base.gpr();
1042     
1043     if (JSString* string = node->child2()->dynamicCastConstant<JSString*>(*m_jit.vm())) {
1044         if (string->tryGetValueImpl() && string->tryGetValueImpl()->isAtomic()) {
1045             StructureStubInfo* stubInfo = m_jit.codeBlock()->addStubInfo(AccessType::In);
1046             
1047             GPRTemporary result(this);
1048             GPRReg resultGPR = result.gpr();
1049
1050             use(node->child2());
1051             
1052             MacroAssembler::PatchableJump jump = m_jit.patchableJump();
1053             MacroAssembler::Label done = m_jit.label();
1054             
1055             // Since this block is executed only when the result of string->tryGetValueImpl() is atomic,
1056             // we can cast it to const AtomicStringImpl* safely.
1057             auto slowPath = slowPathCall(
1058                 jump.m_jump, this, operationInOptimize,
1059                 JSValueRegs::payloadOnly(resultGPR), stubInfo, baseGPR,
1060                 static_cast<const AtomicStringImpl*>(string->tryGetValueImpl()));
1061             
1062             stubInfo->callSiteIndex = m_jit.addCallSite(node->origin.semantic);
1063             stubInfo->codeOrigin = node->origin.semantic;
1064             stubInfo->patch.baseGPR = static_cast<int8_t>(baseGPR);
1065             stubInfo->patch.valueGPR = static_cast<int8_t>(resultGPR);
1066             stubInfo->patch.thisGPR = static_cast<int8_t>(InvalidGPRReg);
1067 #if USE(JSVALUE32_64)
1068             stubInfo->patch.valueTagGPR = static_cast<int8_t>(InvalidGPRReg);
1069             stubInfo->patch.baseTagGPR = static_cast<int8_t>(InvalidGPRReg);
1070             stubInfo->patch.thisTagGPR = static_cast<int8_t>(InvalidGPRReg);
1071 #endif
1072             stubInfo->patch.usedRegisters = usedRegisters();
1073
1074             m_jit.addIn(InRecord(jump, done, slowPath.get(), stubInfo));
1075             addSlowPathGenerator(WTFMove(slowPath));
1076
1077             base.use();
1078
1079             blessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
1080             return;
1081         }
1082     }
1083
1084     JSValueOperand key(this, node->child2());
1085     JSValueRegs regs = key.jsValueRegs();
1086         
1087     GPRFlushedCallResult result(this);
1088     GPRReg resultGPR = result.gpr();
1089         
1090     base.use();
1091     key.use();
1092         
1093     flushRegisters();
1094     callOperation(
1095         operationGenericIn, extractResult(JSValueRegs::payloadOnly(resultGPR)),
1096         baseGPR, regs);
1097     m_jit.exceptionCheck();
1098     blessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
1099 }
1100
1101 void SpeculativeJIT::compileDeleteById(Node* node)
1102 {
1103     JSValueOperand value(this, node->child1());
1104     GPRFlushedCallResult result(this);
1105
1106     JSValueRegs valueRegs = value.jsValueRegs();
1107     GPRReg resultGPR = result.gpr();
1108
1109     value.use();
1110
1111     flushRegisters();
1112     callOperation(operationDeleteById, resultGPR, valueRegs, identifierUID(node->identifierNumber()));
1113     m_jit.exceptionCheck();
1114
1115     unblessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
1116 }
1117
1118 void SpeculativeJIT::compileDeleteByVal(Node* node)
1119 {
1120     JSValueOperand base(this, node->child1());
1121     JSValueOperand key(this, node->child2());
1122     GPRFlushedCallResult result(this);
1123
1124     JSValueRegs baseRegs = base.jsValueRegs();
1125     JSValueRegs keyRegs = key.jsValueRegs();
1126     GPRReg resultGPR = result.gpr();
1127
1128     base.use();
1129     key.use();
1130
1131     flushRegisters();
1132     callOperation(operationDeleteByVal, resultGPR, baseRegs, keyRegs);
1133     m_jit.exceptionCheck();
1134
1135     unblessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
1136 }
1137
1138 bool SpeculativeJIT::nonSpeculativeCompare(Node* node, MacroAssembler::RelationalCondition cond, S_JITOperation_EJJ helperFunction)
1139 {
1140     unsigned branchIndexInBlock = detectPeepHoleBranch();
1141     if (branchIndexInBlock != UINT_MAX) {
1142         Node* branchNode = m_block->at(branchIndexInBlock);
1143
1144         ASSERT(node->adjustedRefCount() == 1);
1145         
1146         nonSpeculativePeepholeBranch(node, branchNode, cond, helperFunction);
1147     
1148         m_indexInBlock = branchIndexInBlock;
1149         m_currentNode = branchNode;
1150         
1151         return true;
1152     }
1153     
1154     nonSpeculativeNonPeepholeCompare(node, cond, helperFunction);
1155     
1156     return false;
1157 }
1158
1159 bool SpeculativeJIT::nonSpeculativeStrictEq(Node* node, bool invert)
1160 {
1161     unsigned branchIndexInBlock = detectPeepHoleBranch();
1162     if (branchIndexInBlock != UINT_MAX) {
1163         Node* branchNode = m_block->at(branchIndexInBlock);
1164
1165         ASSERT(node->adjustedRefCount() == 1);
1166         
1167         nonSpeculativePeepholeStrictEq(node, branchNode, invert);
1168     
1169         m_indexInBlock = branchIndexInBlock;
1170         m_currentNode = branchNode;
1171         
1172         return true;
1173     }
1174     
1175     nonSpeculativeNonPeepholeStrictEq(node, invert);
1176     
1177     return false;
1178 }
1179
1180 static const char* dataFormatString(DataFormat format)
1181 {
1182     // These values correspond to the DataFormat enum.
1183     const char* strings[] = {
1184         "[  ]",
1185         "[ i]",
1186         "[ d]",
1187         "[ c]",
1188         "Err!",
1189         "Err!",
1190         "Err!",
1191         "Err!",
1192         "[J ]",
1193         "[Ji]",
1194         "[Jd]",
1195         "[Jc]",
1196         "Err!",
1197         "Err!",
1198         "Err!",
1199         "Err!",
1200     };
1201     return strings[format];
1202 }
1203
1204 void SpeculativeJIT::dump(const char* label)
1205 {
1206     if (label)
1207         dataLogF("<%s>\n", label);
1208
1209     dataLogF("  gprs:\n");
1210     m_gprs.dump();
1211     dataLogF("  fprs:\n");
1212     m_fprs.dump();
1213     dataLogF("  VirtualRegisters:\n");
1214     for (unsigned i = 0; i < m_generationInfo.size(); ++i) {
1215         GenerationInfo& info = m_generationInfo[i];
1216         if (info.alive())
1217             dataLogF("    % 3d:%s%s", i, dataFormatString(info.registerFormat()), dataFormatString(info.spillFormat()));
1218         else
1219             dataLogF("    % 3d:[__][__]", i);
1220         if (info.registerFormat() == DataFormatDouble)
1221             dataLogF(":fpr%d\n", info.fpr());
1222         else if (info.registerFormat() != DataFormatNone
1223 #if USE(JSVALUE32_64)
1224             && !(info.registerFormat() & DataFormatJS)
1225 #endif
1226             ) {
1227             ASSERT(info.gpr() != InvalidGPRReg);
1228             dataLogF(":%s\n", GPRInfo::debugName(info.gpr()));
1229         } else
1230             dataLogF("\n");
1231     }
1232     if (label)
1233         dataLogF("</%s>\n", label);
1234 }
1235
1236 GPRTemporary::GPRTemporary()
1237     : m_jit(0)
1238     , m_gpr(InvalidGPRReg)
1239 {
1240 }
1241
1242 GPRTemporary::GPRTemporary(SpeculativeJIT* jit)
1243     : m_jit(jit)
1244     , m_gpr(InvalidGPRReg)
1245 {
1246     m_gpr = m_jit->allocate();
1247 }
1248
1249 GPRTemporary::GPRTemporary(SpeculativeJIT* jit, GPRReg specific)
1250     : m_jit(jit)
1251     , m_gpr(InvalidGPRReg)
1252 {
1253     m_gpr = m_jit->allocate(specific);
1254 }
1255
1256 #if USE(JSVALUE32_64)
1257 GPRTemporary::GPRTemporary(
1258     SpeculativeJIT* jit, ReuseTag, JSValueOperand& op1, WhichValueWord which)
1259     : m_jit(jit)
1260     , m_gpr(InvalidGPRReg)
1261 {
1262     if (!op1.isDouble() && m_jit->canReuse(op1.node()))
1263         m_gpr = m_jit->reuse(op1.gpr(which));
1264     else
1265         m_gpr = m_jit->allocate();
1266 }
1267 #endif // USE(JSVALUE32_64)
1268
1269 JSValueRegsTemporary::JSValueRegsTemporary() { }
1270
1271 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit)
1272 #if USE(JSVALUE64)
1273     : m_gpr(jit)
1274 #else
1275     : m_payloadGPR(jit)
1276     , m_tagGPR(jit)
1277 #endif
1278 {
1279 }
1280
1281 #if USE(JSVALUE64)
1282 template<typename T>
1283 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit, ReuseTag, T& operand, WhichValueWord)
1284     : m_gpr(jit, Reuse, operand)
1285 {
1286 }
1287 #else
1288 template<typename T>
1289 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit, ReuseTag, T& operand, WhichValueWord resultWord)
1290 {
1291     if (resultWord == PayloadWord) {
1292         m_payloadGPR = GPRTemporary(jit, Reuse, operand);
1293         m_tagGPR = GPRTemporary(jit);
1294     } else {
1295         m_payloadGPR = GPRTemporary(jit);
1296         m_tagGPR = GPRTemporary(jit, Reuse, operand);
1297     }
1298 }
1299 #endif
1300
1301 #if USE(JSVALUE64)
1302 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit, ReuseTag, JSValueOperand& operand)
1303 {
1304     m_gpr = GPRTemporary(jit, Reuse, operand);
1305 }
1306 #else
1307 JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit, ReuseTag, JSValueOperand& operand)
1308 {
1309     if (jit->canReuse(operand.node())) {
1310         m_payloadGPR = GPRTemporary(jit, Reuse, operand, PayloadWord);
1311         m_tagGPR = GPRTemporary(jit, Reuse, operand, TagWord);
1312     } else {
1313         m_payloadGPR = GPRTemporary(jit);
1314         m_tagGPR = GPRTemporary(jit);
1315     }
1316 }
1317 #endif
1318
1319 JSValueRegsTemporary::~JSValueRegsTemporary() { }
1320
1321 JSValueRegs JSValueRegsTemporary::regs()
1322 {
1323 #if USE(JSVALUE64)
1324     return JSValueRegs(m_gpr.gpr());
1325 #else
1326     return JSValueRegs(m_tagGPR.gpr(), m_payloadGPR.gpr());
1327 #endif
1328 }
1329
1330 void GPRTemporary::adopt(GPRTemporary& other)
1331 {
1332     ASSERT(!m_jit);
1333     ASSERT(m_gpr == InvalidGPRReg);
1334     ASSERT(other.m_jit);
1335     ASSERT(other.m_gpr != InvalidGPRReg);
1336     m_jit = other.m_jit;
1337     m_gpr = other.m_gpr;
1338     other.m_jit = 0;
1339     other.m_gpr = InvalidGPRReg;
1340 }
1341
1342 FPRTemporary::FPRTemporary(FPRTemporary&& other)
1343 {
1344     ASSERT(other.m_jit);
1345     ASSERT(other.m_fpr != InvalidFPRReg);
1346     m_jit = other.m_jit;
1347     m_fpr = other.m_fpr;
1348
1349     other.m_jit = nullptr;
1350 }
1351
1352 FPRTemporary::FPRTemporary(SpeculativeJIT* jit)
1353     : m_jit(jit)
1354     , m_fpr(InvalidFPRReg)
1355 {
1356     m_fpr = m_jit->fprAllocate();
1357 }
1358
1359 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1)
1360     : m_jit(jit)
1361     , m_fpr(InvalidFPRReg)
1362 {
1363     if (m_jit->canReuse(op1.node()))
1364         m_fpr = m_jit->reuse(op1.fpr());
1365     else
1366         m_fpr = m_jit->fprAllocate();
1367 }
1368
1369 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1, SpeculateDoubleOperand& op2)
1370     : m_jit(jit)
1371     , m_fpr(InvalidFPRReg)
1372 {
1373     if (m_jit->canReuse(op1.node()))
1374         m_fpr = m_jit->reuse(op1.fpr());
1375     else if (m_jit->canReuse(op2.node()))
1376         m_fpr = m_jit->reuse(op2.fpr());
1377     else if (m_jit->canReuse(op1.node(), op2.node()) && op1.fpr() == op2.fpr())
1378         m_fpr = m_jit->reuse(op1.fpr());
1379     else
1380         m_fpr = m_jit->fprAllocate();
1381 }
1382
1383 #if USE(JSVALUE32_64)
1384 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, JSValueOperand& op1)
1385     : m_jit(jit)
1386     , m_fpr(InvalidFPRReg)
1387 {
1388     if (op1.isDouble() && m_jit->canReuse(op1.node()))
1389         m_fpr = m_jit->reuse(op1.fpr());
1390     else
1391         m_fpr = m_jit->fprAllocate();
1392 }
1393 #endif
1394
1395 void SpeculativeJIT::compilePeepHoleDoubleBranch(Node* node, Node* branchNode, JITCompiler::DoubleCondition condition)
1396 {
1397     BasicBlock* taken = branchNode->branchData()->taken.block;
1398     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1399
1400     if (taken == nextBlock()) {
1401         condition = MacroAssembler::invert(condition);
1402         std::swap(taken, notTaken);
1403     }
1404
1405     SpeculateDoubleOperand op1(this, node->child1());
1406     SpeculateDoubleOperand op2(this, node->child2());
1407     
1408     branchDouble(condition, op1.fpr(), op2.fpr(), taken);
1409     jump(notTaken);
1410 }
1411
1412 void SpeculativeJIT::compilePeepHoleObjectEquality(Node* node, Node* branchNode)
1413 {
1414     BasicBlock* taken = branchNode->branchData()->taken.block;
1415     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1416
1417     MacroAssembler::RelationalCondition condition = MacroAssembler::Equal;
1418     
1419     if (taken == nextBlock()) {
1420         condition = MacroAssembler::NotEqual;
1421         BasicBlock* tmp = taken;
1422         taken = notTaken;
1423         notTaken = tmp;
1424     }
1425
1426     SpeculateCellOperand op1(this, node->child1());
1427     SpeculateCellOperand op2(this, node->child2());
1428     
1429     GPRReg op1GPR = op1.gpr();
1430     GPRReg op2GPR = op2.gpr();
1431     
1432     if (masqueradesAsUndefinedWatchpointIsStillValid()) {
1433         if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1434             speculationCheck(
1435                 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(), m_jit.branchIfNotObject(op1GPR));
1436         }
1437         if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1438             speculationCheck(
1439                 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(), m_jit.branchIfNotObject(op2GPR));
1440         }
1441     } else {
1442         if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1443             speculationCheck(
1444                 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1445                 m_jit.branchIfNotObject(op1GPR));
1446         }
1447         speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1448             m_jit.branchTest8(
1449                 MacroAssembler::NonZero, 
1450                 MacroAssembler::Address(op1GPR, JSCell::typeInfoFlagsOffset()), 
1451                 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1452
1453         if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1454             speculationCheck(
1455                 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1456                 m_jit.branchIfNotObject(op2GPR));
1457         }
1458         speculationCheck(BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1459             m_jit.branchTest8(
1460                 MacroAssembler::NonZero, 
1461                 MacroAssembler::Address(op2GPR, JSCell::typeInfoFlagsOffset()), 
1462                 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1463     }
1464
1465     branchPtr(condition, op1GPR, op2GPR, taken);
1466     jump(notTaken);
1467 }
1468
1469 void SpeculativeJIT::compilePeepHoleBooleanBranch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1470 {
1471     BasicBlock* taken = branchNode->branchData()->taken.block;
1472     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1473
1474     // The branch instruction will branch to the taken block.
1475     // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1476     if (taken == nextBlock()) {
1477         condition = JITCompiler::invert(condition);
1478         BasicBlock* tmp = taken;
1479         taken = notTaken;
1480         notTaken = tmp;
1481     }
1482
1483     if (node->child1()->isInt32Constant()) {
1484         int32_t imm = node->child1()->asInt32();
1485         SpeculateBooleanOperand op2(this, node->child2());
1486         branch32(condition, JITCompiler::Imm32(imm), op2.gpr(), taken);
1487     } else if (node->child2()->isInt32Constant()) {
1488         SpeculateBooleanOperand op1(this, node->child1());
1489         int32_t imm = node->child2()->asInt32();
1490         branch32(condition, op1.gpr(), JITCompiler::Imm32(imm), taken);
1491     } else {
1492         SpeculateBooleanOperand op1(this, node->child1());
1493         SpeculateBooleanOperand op2(this, node->child2());
1494         branch32(condition, op1.gpr(), op2.gpr(), taken);
1495     }
1496
1497     jump(notTaken);
1498 }
1499
1500 void SpeculativeJIT::compileToLowerCase(Node* node)
1501 {
1502     ASSERT(node->op() == ToLowerCase);
1503     SpeculateCellOperand string(this, node->child1());
1504     GPRTemporary temp(this);
1505     GPRTemporary index(this);
1506     GPRTemporary charReg(this);
1507     GPRTemporary length(this);
1508
1509     GPRReg stringGPR = string.gpr();
1510     GPRReg tempGPR = temp.gpr();
1511     GPRReg indexGPR = index.gpr();
1512     GPRReg charGPR = charReg.gpr();
1513     GPRReg lengthGPR = length.gpr();
1514
1515     speculateString(node->child1(), stringGPR);
1516
1517     CCallHelpers::JumpList slowPath;
1518
1519     m_jit.move(TrustedImmPtr(0), indexGPR);
1520
1521     m_jit.loadPtr(MacroAssembler::Address(stringGPR, JSString::offsetOfValue()), tempGPR);
1522     slowPath.append(m_jit.branchTestPtr(MacroAssembler::Zero, tempGPR));
1523
1524     slowPath.append(m_jit.branchTest32(
1525         MacroAssembler::Zero, MacroAssembler::Address(tempGPR, StringImpl::flagsOffset()),
1526         MacroAssembler::TrustedImm32(StringImpl::flagIs8Bit())));
1527     m_jit.load32(MacroAssembler::Address(tempGPR, StringImpl::lengthMemoryOffset()), lengthGPR);
1528     m_jit.loadPtr(MacroAssembler::Address(tempGPR, StringImpl::dataOffset()), tempGPR);
1529
1530     auto loopStart = m_jit.label();
1531     auto loopDone = m_jit.branch32(CCallHelpers::AboveOrEqual, indexGPR, lengthGPR);
1532     m_jit.load8(MacroAssembler::BaseIndex(tempGPR, indexGPR, MacroAssembler::TimesOne), charGPR);
1533     slowPath.append(m_jit.branchTest32(CCallHelpers::NonZero, charGPR, TrustedImm32(~0x7F)));
1534     m_jit.sub32(TrustedImm32('A'), charGPR);
1535     slowPath.append(m_jit.branch32(CCallHelpers::BelowOrEqual, charGPR, TrustedImm32('Z' - 'A')));
1536
1537     m_jit.add32(TrustedImm32(1), indexGPR);
1538     m_jit.jump().linkTo(loopStart, &m_jit);
1539     
1540     slowPath.link(&m_jit);
1541     silentSpillAllRegisters(lengthGPR);
1542     callOperation(operationToLowerCase, lengthGPR, stringGPR, indexGPR);
1543     silentFillAllRegisters(lengthGPR);
1544     m_jit.exceptionCheck();
1545     auto done = m_jit.jump();
1546
1547     loopDone.link(&m_jit);
1548     m_jit.move(stringGPR, lengthGPR);
1549
1550     done.link(&m_jit);
1551     cellResult(lengthGPR, node);
1552 }
1553
1554 void SpeculativeJIT::compilePeepHoleInt32Branch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1555 {
1556     BasicBlock* taken = branchNode->branchData()->taken.block;
1557     BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1558
1559     // The branch instruction will branch to the taken block.
1560     // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1561     if (taken == nextBlock()) {
1562         condition = JITCompiler::invert(condition);
1563         BasicBlock* tmp = taken;
1564         taken = notTaken;
1565         notTaken = tmp;
1566     }
1567
1568     if (node->child1()->isInt32Constant()) {
1569         int32_t imm = node->child1()->asInt32();
1570         SpeculateInt32Operand op2(this, node->child2());
1571         branch32(condition, JITCompiler::Imm32(imm), op2.gpr(), taken);
1572     } else if (node->child2()->isInt32Constant()) {
1573         SpeculateInt32Operand op1(this, node->child1());
1574         int32_t imm = node->child2()->asInt32();
1575         branch32(condition, op1.gpr(), JITCompiler::Imm32(imm), taken);
1576     } else {
1577         SpeculateInt32Operand op1(this, node->child1());
1578         SpeculateInt32Operand op2(this, node->child2());
1579         branch32(condition, op1.gpr(), op2.gpr(), taken);
1580     }
1581
1582     jump(notTaken);
1583 }
1584
1585 // Returns true if the compare is fused with a subsequent branch.
1586 bool SpeculativeJIT::compilePeepHoleBranch(Node* node, MacroAssembler::RelationalCondition condition, MacroAssembler::DoubleCondition doubleCondition, S_JITOperation_EJJ operation)
1587 {
1588     // Fused compare & branch.
1589     unsigned branchIndexInBlock = detectPeepHoleBranch();
1590     if (branchIndexInBlock != UINT_MAX) {
1591         Node* branchNode = m_block->at(branchIndexInBlock);
1592
1593         // detectPeepHoleBranch currently only permits the branch to be the very next node,
1594         // so can be no intervening nodes to also reference the compare. 
1595         ASSERT(node->adjustedRefCount() == 1);
1596
1597         if (node->isBinaryUseKind(Int32Use))
1598             compilePeepHoleInt32Branch(node, branchNode, condition);
1599 #if USE(JSVALUE64)
1600         else if (node->isBinaryUseKind(Int52RepUse))
1601             compilePeepHoleInt52Branch(node, branchNode, condition);
1602 #endif // USE(JSVALUE64)
1603         else if (node->isBinaryUseKind(StringUse) || node->isBinaryUseKind(StringIdentUse)) {
1604             // Use non-peephole comparison, for now.
1605             return false;
1606         } else if (node->isBinaryUseKind(DoubleRepUse))
1607             compilePeepHoleDoubleBranch(node, branchNode, doubleCondition);
1608         else if (node->op() == CompareEq) {
1609             if (node->isBinaryUseKind(BooleanUse))
1610                 compilePeepHoleBooleanBranch(node, branchNode, condition);
1611             else if (node->isBinaryUseKind(SymbolUse))
1612                 compilePeepHoleSymbolEquality(node, branchNode);
1613             else if (node->isBinaryUseKind(ObjectUse))
1614                 compilePeepHoleObjectEquality(node, branchNode);
1615             else if (node->isBinaryUseKind(ObjectUse, ObjectOrOtherUse))
1616                 compilePeepHoleObjectToObjectOrOtherEquality(node->child1(), node->child2(), branchNode);
1617             else if (node->isBinaryUseKind(ObjectOrOtherUse, ObjectUse))
1618                 compilePeepHoleObjectToObjectOrOtherEquality(node->child2(), node->child1(), branchNode);
1619             else if (!needsTypeCheck(node->child1(), SpecOther))
1620                 nonSpeculativePeepholeBranchNullOrUndefined(node->child2(), branchNode);
1621             else if (!needsTypeCheck(node->child2(), SpecOther))
1622                 nonSpeculativePeepholeBranchNullOrUndefined(node->child1(), branchNode);
1623             else {
1624                 nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1625                 return true;
1626             }
1627         } else {
1628             nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
1629             return true;
1630         }
1631
1632         use(node->child1());
1633         use(node->child2());
1634         m_indexInBlock = branchIndexInBlock;
1635         m_currentNode = branchNode;
1636         return true;
1637     }
1638     return false;
1639 }
1640
1641 void SpeculativeJIT::noticeOSRBirth(Node* node)
1642 {
1643     if (!node->hasVirtualRegister())
1644         return;
1645     
1646     VirtualRegister virtualRegister = node->virtualRegister();
1647     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1648     
1649     info.noticeOSRBirth(*m_stream, node, virtualRegister);
1650 }
1651
1652 void SpeculativeJIT::compileMovHint(Node* node)
1653 {
1654     ASSERT(node->containsMovHint() && node->op() != ZombieHint);
1655     
1656     Node* child = node->child1().node();
1657     noticeOSRBirth(child);
1658     
1659     m_stream->appendAndLog(VariableEvent::movHint(MinifiedID(child), node->unlinkedLocal()));
1660 }
1661
1662 void SpeculativeJIT::bail(AbortReason reason)
1663 {
1664     if (verboseCompilationEnabled())
1665         dataLog("Bailing compilation.\n");
1666     m_compileOkay = true;
1667     m_jit.abortWithReason(reason, m_lastGeneratedNode);
1668     clearGenerationInfo();
1669 }
1670
1671 void SpeculativeJIT::compileCurrentBlock()
1672 {
1673     ASSERT(m_compileOkay);
1674     
1675     if (!m_block)
1676         return;
1677     
1678     ASSERT(m_block->isReachable);
1679     
1680     m_jit.blockHeads()[m_block->index] = m_jit.label();
1681
1682     if (!m_block->intersectionOfCFAHasVisited) {
1683         // Don't generate code for basic blocks that are unreachable according to CFA.
1684         // But to be sure that nobody has generated a jump to this block, drop in a
1685         // breakpoint here.
1686         m_jit.abortWithReason(DFGUnreachableBasicBlock);
1687         return;
1688     }
1689
1690     m_stream->appendAndLog(VariableEvent::reset());
1691     
1692     m_jit.jitAssertHasValidCallFrame();
1693     m_jit.jitAssertTagsInPlace();
1694     m_jit.jitAssertArgumentCountSane();
1695
1696     m_state.reset();
1697     m_state.beginBasicBlock(m_block);
1698     
1699     for (size_t i = m_block->variablesAtHead.size(); i--;) {
1700         int operand = m_block->variablesAtHead.operandForIndex(i);
1701         Node* node = m_block->variablesAtHead[i];
1702         if (!node)
1703             continue; // No need to record dead SetLocal's.
1704         
1705         VariableAccessData* variable = node->variableAccessData();
1706         DataFormat format;
1707         if (!node->refCount())
1708             continue; // No need to record dead SetLocal's.
1709         format = dataFormatFor(variable->flushFormat());
1710         m_stream->appendAndLog(
1711             VariableEvent::setLocal(
1712                 VirtualRegister(operand),
1713                 variable->machineLocal(),
1714                 format));
1715     }
1716
1717     m_origin = NodeOrigin();
1718     
1719     for (m_indexInBlock = 0; m_indexInBlock < m_block->size(); ++m_indexInBlock) {
1720         m_currentNode = m_block->at(m_indexInBlock);
1721         
1722         // We may have hit a contradiction that the CFA was aware of but that the JIT
1723         // didn't cause directly.
1724         if (!m_state.isValid()) {
1725             bail(DFGBailedAtTopOfBlock);
1726             return;
1727         }
1728
1729         m_interpreter.startExecuting();
1730         m_interpreter.executeKnownEdgeTypes(m_currentNode);
1731         m_jit.setForNode(m_currentNode);
1732         m_origin = m_currentNode->origin;
1733         if (validationEnabled())
1734             m_origin.exitOK &= mayExit(m_jit.graph(), m_currentNode) == Exits;
1735         m_lastGeneratedNode = m_currentNode->op();
1736         
1737         ASSERT(m_currentNode->shouldGenerate());
1738         
1739         if (verboseCompilationEnabled()) {
1740             dataLogF(
1741                 "SpeculativeJIT generating Node @%d (bc#%u) at JIT offset 0x%x",
1742                 (int)m_currentNode->index(),
1743                 m_currentNode->origin.semantic.bytecodeIndex, m_jit.debugOffset());
1744             dataLog("\n");
1745         }
1746
1747         if (Options::validateDFGExceptionHandling() && (mayExit(m_jit.graph(), m_currentNode) != DoesNotExit || m_currentNode->isTerminal()))
1748             m_jit.jitReleaseAssertNoException(*m_jit.vm());
1749
1750         m_jit.pcToCodeOriginMapBuilder().appendItem(m_jit.labelIgnoringWatchpoints(), m_origin.semantic);
1751
1752         compile(m_currentNode);
1753         
1754         if (belongsInMinifiedGraph(m_currentNode->op()))
1755             m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode));
1756         
1757 #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
1758         m_jit.clearRegisterAllocationOffsets();
1759 #endif
1760         
1761         if (!m_compileOkay) {
1762             bail(DFGBailedAtEndOfNode);
1763             return;
1764         }
1765         
1766         // Make sure that the abstract state is rematerialized for the next node.
1767         m_interpreter.executeEffects(m_indexInBlock);
1768     }
1769     
1770     // Perform the most basic verification that children have been used correctly.
1771     if (!ASSERT_DISABLED) {
1772         for (auto& info : m_generationInfo)
1773             RELEASE_ASSERT(!info.alive());
1774     }
1775 }
1776
1777 // If we are making type predictions about our arguments then
1778 // we need to check that they are correct on function entry.
1779 void SpeculativeJIT::checkArgumentTypes()
1780 {
1781     ASSERT(!m_currentNode);
1782     m_origin = NodeOrigin(CodeOrigin(0), CodeOrigin(0), true);
1783
1784     for (int i = 0; i < m_jit.codeBlock()->numParameters(); ++i) {
1785         Node* node = m_jit.graph().m_arguments[i];
1786         if (!node) {
1787             // The argument is dead. We don't do any checks for such arguments.
1788             continue;
1789         }
1790         
1791         ASSERT(node->op() == SetArgument);
1792         ASSERT(node->shouldGenerate());
1793
1794         VariableAccessData* variableAccessData = node->variableAccessData();
1795         FlushFormat format = variableAccessData->flushFormat();
1796         
1797         if (format == FlushedJSValue)
1798             continue;
1799         
1800         VirtualRegister virtualRegister = variableAccessData->local();
1801
1802         JSValueSource valueSource = JSValueSource(JITCompiler::addressFor(virtualRegister));
1803         
1804 #if USE(JSVALUE64)
1805         switch (format) {
1806         case FlushedInt32: {
1807             speculationCheck(BadType, valueSource, node, m_jit.branch64(MacroAssembler::Below, JITCompiler::addressFor(virtualRegister), GPRInfo::tagTypeNumberRegister));
1808             break;
1809         }
1810         case FlushedBoolean: {
1811             GPRTemporary temp(this);
1812             m_jit.load64(JITCompiler::addressFor(virtualRegister), temp.gpr());
1813             m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), temp.gpr());
1814             speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, temp.gpr(), TrustedImm32(static_cast<int32_t>(~1))));
1815             break;
1816         }
1817         case FlushedCell: {
1818             speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, JITCompiler::addressFor(virtualRegister), GPRInfo::tagMaskRegister));
1819             break;
1820         }
1821         default:
1822             RELEASE_ASSERT_NOT_REACHED();
1823             break;
1824         }
1825 #else
1826         switch (format) {
1827         case FlushedInt32: {
1828             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::Int32Tag)));
1829             break;
1830         }
1831         case FlushedBoolean: {
1832             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::BooleanTag)));
1833             break;
1834         }
1835         case FlushedCell: {
1836             speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::CellTag)));
1837             break;
1838         }
1839         default:
1840             RELEASE_ASSERT_NOT_REACHED();
1841             break;
1842         }
1843 #endif
1844     }
1845
1846     m_origin = NodeOrigin();
1847 }
1848
1849 bool SpeculativeJIT::compile()
1850 {
1851     checkArgumentTypes();
1852     
1853     ASSERT(!m_currentNode);
1854     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1855         m_jit.setForBlockIndex(blockIndex);
1856         m_block = m_jit.graph().block(blockIndex);
1857         compileCurrentBlock();
1858     }
1859     linkBranches();
1860     return true;
1861 }
1862
1863 void SpeculativeJIT::createOSREntries()
1864 {
1865     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1866         BasicBlock* block = m_jit.graph().block(blockIndex);
1867         if (!block)
1868             continue;
1869         if (!block->isOSRTarget)
1870             continue;
1871         
1872         // Currently we don't have OSR entry trampolines. We could add them
1873         // here if need be.
1874         m_osrEntryHeads.append(m_jit.blockHeads()[blockIndex]);
1875     }
1876 }
1877
1878 void SpeculativeJIT::linkOSREntries(LinkBuffer& linkBuffer)
1879 {
1880     unsigned osrEntryIndex = 0;
1881     for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
1882         BasicBlock* block = m_jit.graph().block(blockIndex);
1883         if (!block)
1884             continue;
1885         if (!block->isOSRTarget)
1886             continue;
1887         m_jit.noticeOSREntry(*block, m_osrEntryHeads[osrEntryIndex++], linkBuffer);
1888     }
1889     ASSERT(osrEntryIndex == m_osrEntryHeads.size());
1890     
1891     if (verboseCompilationEnabled()) {
1892         DumpContext dumpContext;
1893         dataLog("OSR Entries:\n");
1894         for (OSREntryData& entryData : m_jit.jitCode()->osrEntry)
1895             dataLog("    ", inContext(entryData, &dumpContext), "\n");
1896         if (!dumpContext.isEmpty())
1897             dumpContext.dump(WTF::dataFile());
1898     }
1899 }
1900     
1901 void SpeculativeJIT::compileCheckTraps(Node*)
1902 {
1903     ASSERT(Options::usePollingTraps());
1904     GPRTemporary unused(this);
1905     GPRReg unusedGPR = unused.gpr();
1906
1907     JITCompiler::Jump needTrapHandling = m_jit.branchTest8(JITCompiler::NonZero,
1908         JITCompiler::AbsoluteAddress(m_jit.vm()->needTrapHandlingAddress()));
1909
1910     addSlowPathGenerator(slowPathCall(needTrapHandling, this, operationHandleTraps, unusedGPR));
1911 }
1912
1913 void SpeculativeJIT::compileDoublePutByVal(Node* node, SpeculateCellOperand& base, SpeculateStrictInt32Operand& property)
1914 {
1915     Edge child3 = m_jit.graph().varArgChild(node, 2);
1916     Edge child4 = m_jit.graph().varArgChild(node, 3);
1917
1918     ArrayMode arrayMode = node->arrayMode();
1919     
1920     GPRReg baseReg = base.gpr();
1921     GPRReg propertyReg = property.gpr();
1922     
1923     SpeculateDoubleOperand value(this, child3);
1924
1925     FPRReg valueReg = value.fpr();
1926     
1927     DFG_TYPE_CHECK(
1928         JSValueRegs(), child3, SpecFullRealNumber,
1929         m_jit.branchDouble(
1930             MacroAssembler::DoubleNotEqualOrUnordered, valueReg, valueReg));
1931     
1932     if (!m_compileOkay)
1933         return;
1934     
1935     StorageOperand storage(this, child4);
1936     GPRReg storageReg = storage.gpr();
1937
1938     if (node->op() == PutByValAlias) {
1939         // Store the value to the array.
1940         GPRReg propertyReg = property.gpr();
1941         FPRReg valueReg = value.fpr();
1942         m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
1943         
1944         noResult(m_currentNode);
1945         return;
1946     }
1947     
1948     GPRTemporary temporary;
1949     GPRReg temporaryReg = temporaryRegisterForPutByVal(temporary, node);
1950
1951     MacroAssembler::Jump slowCase;
1952     
1953     if (arrayMode.isInBounds()) {
1954         speculationCheck(
1955             OutOfBounds, JSValueRegs(), 0,
1956             m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())));
1957     } else {
1958         MacroAssembler::Jump inBounds = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
1959         
1960         slowCase = m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfVectorLength()));
1961         
1962         if (!arrayMode.isOutOfBounds())
1963             speculationCheck(OutOfBounds, JSValueRegs(), 0, slowCase);
1964         
1965         m_jit.add32(TrustedImm32(1), propertyReg, temporaryReg);
1966         m_jit.store32(temporaryReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
1967         
1968         inBounds.link(&m_jit);
1969     }
1970     
1971     m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
1972
1973     base.use();
1974     property.use();
1975     value.use();
1976     storage.use();
1977     
1978     if (arrayMode.isOutOfBounds()) {
1979         addSlowPathGenerator(
1980             slowPathCall(
1981                 slowCase, this,
1982                 m_jit.codeBlock()->isStrictMode() ? operationPutDoubleByValBeyondArrayBoundsStrict : operationPutDoubleByValBeyondArrayBoundsNonStrict,
1983                 NoResult, baseReg, propertyReg, valueReg));
1984     }
1985
1986     noResult(m_currentNode, UseChildrenCalledExplicitly);
1987 }
1988
1989 void SpeculativeJIT::compileGetCharCodeAt(Node* node)
1990 {
1991     SpeculateCellOperand string(this, node->child1());
1992     SpeculateStrictInt32Operand index(this, node->child2());
1993     StorageOperand storage(this, node->child3());
1994
1995     GPRReg stringReg = string.gpr();
1996     GPRReg indexReg = index.gpr();
1997     GPRReg storageReg = storage.gpr();
1998     
1999     ASSERT(speculationChecked(m_state.forNode(node->child1()).m_type, SpecString));
2000
2001     // unsigned comparison so we can filter out negative indices and indices that are too large
2002     speculationCheck(Uncountable, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::AboveOrEqual, indexReg, MacroAssembler::Address(stringReg, JSString::offsetOfLength())));
2003
2004     GPRTemporary scratch(this);
2005     GPRReg scratchReg = scratch.gpr();
2006
2007     m_jit.loadPtr(MacroAssembler::Address(stringReg, JSString::offsetOfValue()), scratchReg);
2008
2009     // Load the character into scratchReg
2010     JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
2011
2012     m_jit.load8(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesOne, 0), scratchReg);
2013     JITCompiler::Jump cont8Bit = m_jit.jump();
2014
2015     is16Bit.link(&m_jit);
2016
2017     m_jit.load16(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesTwo, 0), scratchReg);
2018
2019     cont8Bit.link(&m_jit);
2020
2021     int32Result(scratchReg, m_currentNode);
2022 }
2023
2024 void SpeculativeJIT::compileGetByValOnString(Node* node)
2025 {
2026     SpeculateCellOperand base(this, node->child1());
2027     SpeculateStrictInt32Operand property(this, node->child2());
2028     StorageOperand storage(this, node->child3());
2029     GPRReg baseReg = base.gpr();
2030     GPRReg propertyReg = property.gpr();
2031     GPRReg storageReg = storage.gpr();
2032
2033     GPRTemporary scratch(this);
2034     GPRReg scratchReg = scratch.gpr();
2035 #if USE(JSVALUE32_64)
2036     GPRTemporary resultTag;
2037     GPRReg resultTagReg = InvalidGPRReg;
2038     if (node->arrayMode().isOutOfBounds()) {
2039         GPRTemporary realResultTag(this);
2040         resultTag.adopt(realResultTag);
2041         resultTagReg = resultTag.gpr();
2042     }
2043 #endif
2044
2045     ASSERT(ArrayMode(Array::String).alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2046
2047     // unsigned comparison so we can filter out negative indices and indices that are too large
2048     JITCompiler::Jump outOfBounds = m_jit.branch32(
2049         MacroAssembler::AboveOrEqual, propertyReg,
2050         MacroAssembler::Address(baseReg, JSString::offsetOfLength()));
2051     if (node->arrayMode().isInBounds())
2052         speculationCheck(OutOfBounds, JSValueRegs(), 0, outOfBounds);
2053
2054     m_jit.loadPtr(MacroAssembler::Address(baseReg, JSString::offsetOfValue()), scratchReg);
2055
2056     // Load the character into scratchReg
2057     JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
2058
2059     m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne, 0), scratchReg);
2060     JITCompiler::Jump cont8Bit = m_jit.jump();
2061
2062     is16Bit.link(&m_jit);
2063
2064     m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo, 0), scratchReg);
2065
2066     JITCompiler::Jump bigCharacter =
2067         m_jit.branch32(MacroAssembler::AboveOrEqual, scratchReg, TrustedImm32(0x100));
2068
2069     // 8 bit string values don't need the isASCII check.
2070     cont8Bit.link(&m_jit);
2071
2072     m_jit.lshift32(MacroAssembler::TrustedImm32(sizeof(void*) == 4 ? 2 : 3), scratchReg);
2073     m_jit.addPtr(TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), scratchReg);
2074     m_jit.loadPtr(scratchReg, scratchReg);
2075
2076     addSlowPathGenerator(
2077         slowPathCall(
2078             bigCharacter, this, operationSingleCharacterString, scratchReg, scratchReg));
2079
2080     if (node->arrayMode().isOutOfBounds()) {
2081 #if USE(JSVALUE32_64)
2082         m_jit.move(TrustedImm32(JSValue::CellTag), resultTagReg);
2083 #endif
2084
2085         JSGlobalObject* globalObject = m_jit.globalObjectFor(node->origin.semantic);
2086         bool prototypeChainIsSane = false;
2087         if (globalObject->stringPrototypeChainIsSane()) {
2088             // FIXME: This could be captured using a Speculation mode that means "out-of-bounds
2089             // loads return a trivial value". Something like SaneChainOutOfBounds. This should
2090             // speculate that we don't take negative out-of-bounds, or better yet, it should rely
2091             // on a stringPrototypeChainIsSane() guaranteeing that the prototypes have no negative
2092             // indexed properties either.
2093             // https://bugs.webkit.org/show_bug.cgi?id=144668
2094             m_jit.graph().watchpoints().addLazily(globalObject->stringPrototype()->structure()->transitionWatchpointSet());
2095             m_jit.graph().watchpoints().addLazily(globalObject->objectPrototype()->structure()->transitionWatchpointSet());
2096             prototypeChainIsSane = globalObject->stringPrototypeChainIsSane();
2097         }
2098         if (prototypeChainIsSane) {
2099             m_jit.graph().watchpoints().addLazily(globalObject->stringPrototype()->structure()->transitionWatchpointSet());
2100             m_jit.graph().watchpoints().addLazily(globalObject->objectPrototype()->structure()->transitionWatchpointSet());
2101             
2102 #if USE(JSVALUE64)
2103             addSlowPathGenerator(std::make_unique<SaneStringGetByValSlowPathGenerator>(
2104                 outOfBounds, this, JSValueRegs(scratchReg), baseReg, propertyReg));
2105 #else
2106             addSlowPathGenerator(std::make_unique<SaneStringGetByValSlowPathGenerator>(
2107                 outOfBounds, this, JSValueRegs(resultTagReg, scratchReg),
2108                 baseReg, propertyReg));
2109 #endif
2110         } else {
2111 #if USE(JSVALUE64)
2112             addSlowPathGenerator(
2113                 slowPathCall(
2114                     outOfBounds, this, operationGetByValStringInt,
2115                     scratchReg, baseReg, propertyReg));
2116 #else
2117             addSlowPathGenerator(
2118                 slowPathCall(
2119                     outOfBounds, this, operationGetByValStringInt,
2120                     JSValueRegs(resultTagReg, scratchReg), baseReg, propertyReg));
2121 #endif
2122         }
2123         
2124 #if USE(JSVALUE64)
2125         jsValueResult(scratchReg, m_currentNode);
2126 #else
2127         jsValueResult(resultTagReg, scratchReg, m_currentNode);
2128 #endif
2129     } else
2130         cellResult(scratchReg, m_currentNode);
2131 }
2132
2133 void SpeculativeJIT::compileFromCharCode(Node* node)
2134 {
2135     Edge& child = node->child1();
2136     if (child.useKind() == UntypedUse) {
2137         JSValueOperand opr(this, child);
2138         JSValueRegs oprRegs = opr.jsValueRegs();
2139 #if USE(JSVALUE64)
2140         GPRTemporary result(this);
2141         JSValueRegs resultRegs = JSValueRegs(result.gpr());
2142 #else
2143         GPRTemporary resultTag(this);
2144         GPRTemporary resultPayload(this);
2145         JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
2146 #endif
2147         flushRegisters();
2148         callOperation(operationStringFromCharCodeUntyped, resultRegs, oprRegs);
2149         m_jit.exceptionCheck();
2150         
2151         jsValueResult(resultRegs, node);
2152         return;
2153     }
2154
2155     SpeculateStrictInt32Operand property(this, child);
2156     GPRReg propertyReg = property.gpr();
2157     GPRTemporary smallStrings(this);
2158     GPRTemporary scratch(this);
2159     GPRReg scratchReg = scratch.gpr();
2160     GPRReg smallStringsReg = smallStrings.gpr();
2161
2162     JITCompiler::JumpList slowCases;
2163     slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, TrustedImm32(0xff)));
2164     m_jit.move(TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), smallStringsReg);
2165     m_jit.loadPtr(MacroAssembler::BaseIndex(smallStringsReg, propertyReg, MacroAssembler::ScalePtr, 0), scratchReg);
2166
2167     slowCases.append(m_jit.branchTest32(MacroAssembler::Zero, scratchReg));
2168     addSlowPathGenerator(slowPathCall(slowCases, this, operationStringFromCharCode, scratchReg, propertyReg));
2169     cellResult(scratchReg, m_currentNode);
2170 }
2171
2172 GeneratedOperandType SpeculativeJIT::checkGeneratedTypeForToInt32(Node* node)
2173 {
2174     VirtualRegister virtualRegister = node->virtualRegister();
2175     GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
2176
2177     switch (info.registerFormat()) {
2178     case DataFormatStorage:
2179         RELEASE_ASSERT_NOT_REACHED();
2180
2181     case DataFormatBoolean:
2182     case DataFormatCell:
2183         terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
2184         return GeneratedOperandTypeUnknown;
2185
2186     case DataFormatNone:
2187     case DataFormatJSCell:
2188     case DataFormatJS:
2189     case DataFormatJSBoolean:
2190     case DataFormatJSDouble:
2191         return GeneratedOperandJSValue;
2192
2193     case DataFormatJSInt32:
2194     case DataFormatInt32:
2195         return GeneratedOperandInteger;
2196
2197     default:
2198         RELEASE_ASSERT_NOT_REACHED();
2199         return GeneratedOperandTypeUnknown;
2200     }
2201 }
2202
2203 void SpeculativeJIT::compileValueToInt32(Node* node)
2204 {
2205     switch (node->child1().useKind()) {
2206 #if USE(JSVALUE64)
2207     case Int52RepUse: {
2208         SpeculateStrictInt52Operand op1(this, node->child1());
2209         GPRTemporary result(this, Reuse, op1);
2210         GPRReg op1GPR = op1.gpr();
2211         GPRReg resultGPR = result.gpr();
2212         m_jit.zeroExtend32ToPtr(op1GPR, resultGPR);
2213         int32Result(resultGPR, node, DataFormatInt32);
2214         return;
2215     }
2216 #endif // USE(JSVALUE64)
2217         
2218     case DoubleRepUse: {
2219         GPRTemporary result(this);
2220         SpeculateDoubleOperand op1(this, node->child1());
2221         FPRReg fpr = op1.fpr();
2222         GPRReg gpr = result.gpr();
2223         JITCompiler::Jump notTruncatedToInteger = m_jit.branchTruncateDoubleToInt32(fpr, gpr, JITCompiler::BranchIfTruncateFailed);
2224         
2225         addSlowPathGenerator(slowPathCall(notTruncatedToInteger, this,
2226             hasSensibleDoubleToInt() ? operationToInt32SensibleSlow : operationToInt32, NeedToSpill, ExceptionCheckRequirement::CheckNotNeeded, gpr, fpr));
2227         
2228         int32Result(gpr, node);
2229         return;
2230     }
2231     
2232     case NumberUse:
2233     case NotCellUse: {
2234         switch (checkGeneratedTypeForToInt32(node->child1().node())) {
2235         case GeneratedOperandInteger: {
2236             SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
2237             GPRTemporary result(this, Reuse, op1);
2238             m_jit.move(op1.gpr(), result.gpr());
2239             int32Result(result.gpr(), node, op1.format());
2240             return;
2241         }
2242         case GeneratedOperandJSValue: {
2243             GPRTemporary result(this);
2244 #if USE(JSVALUE64)
2245             JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2246
2247             GPRReg gpr = op1.gpr();
2248             GPRReg resultGpr = result.gpr();
2249             FPRTemporary tempFpr(this);
2250             FPRReg fpr = tempFpr.fpr();
2251
2252             JITCompiler::Jump isInteger = m_jit.branch64(MacroAssembler::AboveOrEqual, gpr, GPRInfo::tagTypeNumberRegister);
2253             JITCompiler::JumpList converted;
2254
2255             if (node->child1().useKind() == NumberUse) {
2256                 DFG_TYPE_CHECK(
2257                     JSValueRegs(gpr), node->child1(), SpecBytecodeNumber,
2258                     m_jit.branchTest64(
2259                         MacroAssembler::Zero, gpr, GPRInfo::tagTypeNumberRegister));
2260             } else {
2261                 JITCompiler::Jump isNumber = m_jit.branchTest64(MacroAssembler::NonZero, gpr, GPRInfo::tagTypeNumberRegister);
2262                 
2263                 DFG_TYPE_CHECK(
2264                     JSValueRegs(gpr), node->child1(), ~SpecCell, m_jit.branchIfCell(JSValueRegs(gpr)));
2265                 
2266                 // It's not a cell: so true turns into 1 and all else turns into 0.
2267                 m_jit.compare64(JITCompiler::Equal, gpr, TrustedImm32(ValueTrue), resultGpr);
2268                 converted.append(m_jit.jump());
2269                 
2270                 isNumber.link(&m_jit);
2271             }
2272
2273             // First, if we get here we have a double encoded as a JSValue
2274             unboxDouble(gpr, resultGpr, fpr);
2275
2276             silentSpillAllRegisters(resultGpr);
2277             callOperation(operationToInt32, resultGpr, fpr);
2278             silentFillAllRegisters(resultGpr);
2279
2280             converted.append(m_jit.jump());
2281
2282             isInteger.link(&m_jit);
2283             m_jit.zeroExtend32ToPtr(gpr, resultGpr);
2284
2285             converted.link(&m_jit);
2286 #else
2287             Node* childNode = node->child1().node();
2288             VirtualRegister virtualRegister = childNode->virtualRegister();
2289             GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
2290
2291             JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2292
2293             GPRReg payloadGPR = op1.payloadGPR();
2294             GPRReg resultGpr = result.gpr();
2295         
2296             JITCompiler::JumpList converted;
2297
2298             if (info.registerFormat() == DataFormatJSInt32)
2299                 m_jit.move(payloadGPR, resultGpr);
2300             else {
2301                 GPRReg tagGPR = op1.tagGPR();
2302                 FPRTemporary tempFpr(this);
2303                 FPRReg fpr = tempFpr.fpr();
2304                 FPRTemporary scratch(this);
2305
2306                 JITCompiler::Jump isInteger = m_jit.branch32(MacroAssembler::Equal, tagGPR, TrustedImm32(JSValue::Int32Tag));
2307
2308                 if (node->child1().useKind() == NumberUse) {
2309                     DFG_TYPE_CHECK(
2310                         op1.jsValueRegs(), node->child1(), SpecBytecodeNumber,
2311                         m_jit.branch32(
2312                             MacroAssembler::AboveOrEqual, tagGPR,
2313                             TrustedImm32(JSValue::LowestTag)));
2314                 } else {
2315                     JITCompiler::Jump isNumber = m_jit.branch32(MacroAssembler::Below, tagGPR, TrustedImm32(JSValue::LowestTag));
2316                     
2317                     DFG_TYPE_CHECK(
2318                         op1.jsValueRegs(), node->child1(), ~SpecCell,
2319                         m_jit.branchIfCell(op1.jsValueRegs()));
2320                     
2321                     // It's not a cell: so true turns into 1 and all else turns into 0.
2322                     JITCompiler::Jump isBoolean = m_jit.branch32(JITCompiler::Equal, tagGPR, TrustedImm32(JSValue::BooleanTag));
2323                     m_jit.move(TrustedImm32(0), resultGpr);
2324                     converted.append(m_jit.jump());
2325                     
2326                     isBoolean.link(&m_jit);
2327                     m_jit.move(payloadGPR, resultGpr);
2328                     converted.append(m_jit.jump());
2329                     
2330                     isNumber.link(&m_jit);
2331                 }
2332
2333                 unboxDouble(tagGPR, payloadGPR, fpr, scratch.fpr());
2334
2335                 silentSpillAllRegisters(resultGpr);
2336                 callOperation(operationToInt32, resultGpr, fpr);
2337                 silentFillAllRegisters(resultGpr);
2338
2339                 converted.append(m_jit.jump());
2340
2341                 isInteger.link(&m_jit);
2342                 m_jit.move(payloadGPR, resultGpr);
2343
2344                 converted.link(&m_jit);
2345             }
2346 #endif
2347             int32Result(resultGpr, node);
2348             return;
2349         }
2350         case GeneratedOperandTypeUnknown:
2351             RELEASE_ASSERT(!m_compileOkay);
2352             return;
2353         }
2354         RELEASE_ASSERT_NOT_REACHED();
2355         return;
2356     }
2357     
2358     default:
2359         ASSERT(!m_compileOkay);
2360         return;
2361     }
2362 }
2363
2364 void SpeculativeJIT::compileUInt32ToNumber(Node* node)
2365 {
2366     if (doesOverflow(node->arithMode())) {
2367         if (enableInt52()) {
2368             SpeculateInt32Operand op1(this, node->child1());
2369             GPRTemporary result(this, Reuse, op1);
2370             m_jit.zeroExtend32ToPtr(op1.gpr(), result.gpr());
2371             strictInt52Result(result.gpr(), node);
2372             return;
2373         }
2374         SpeculateInt32Operand op1(this, node->child1());
2375         FPRTemporary result(this);
2376             
2377         GPRReg inputGPR = op1.gpr();
2378         FPRReg outputFPR = result.fpr();
2379             
2380         m_jit.convertInt32ToDouble(inputGPR, outputFPR);
2381             
2382         JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, inputGPR, TrustedImm32(0));
2383         m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), outputFPR);
2384         positive.link(&m_jit);
2385             
2386         doubleResult(outputFPR, node);
2387         return;
2388     }
2389     
2390     RELEASE_ASSERT(node->arithMode() == Arith::CheckOverflow);
2391
2392     SpeculateInt32Operand op1(this, node->child1());
2393     GPRTemporary result(this);
2394
2395     m_jit.move(op1.gpr(), result.gpr());
2396
2397     speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, result.gpr(), TrustedImm32(0)));
2398
2399     int32Result(result.gpr(), node, op1.format());
2400 }
2401
2402 void SpeculativeJIT::compileDoubleAsInt32(Node* node)
2403 {
2404     SpeculateDoubleOperand op1(this, node->child1());
2405     FPRTemporary scratch(this);
2406     GPRTemporary result(this);
2407     
2408     FPRReg valueFPR = op1.fpr();
2409     FPRReg scratchFPR = scratch.fpr();
2410     GPRReg resultGPR = result.gpr();
2411
2412     JITCompiler::JumpList failureCases;
2413     RELEASE_ASSERT(shouldCheckOverflow(node->arithMode()));
2414     m_jit.branchConvertDoubleToInt32(
2415         valueFPR, resultGPR, failureCases, scratchFPR,
2416         shouldCheckNegativeZero(node->arithMode()));
2417     speculationCheck(Overflow, JSValueRegs(), 0, failureCases);
2418
2419     int32Result(resultGPR, node);
2420 }
2421
2422 void SpeculativeJIT::compileDoubleRep(Node* node)
2423 {
2424     switch (node->child1().useKind()) {
2425     case RealNumberUse: {
2426         JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2427         FPRTemporary result(this);
2428         
2429         JSValueRegs op1Regs = op1.jsValueRegs();
2430         FPRReg resultFPR = result.fpr();
2431         
2432 #if USE(JSVALUE64)
2433         GPRTemporary temp(this);
2434         GPRReg tempGPR = temp.gpr();
2435         m_jit.unboxDoubleWithoutAssertions(op1Regs.gpr(), tempGPR, resultFPR);
2436 #else
2437         FPRTemporary temp(this);
2438         FPRReg tempFPR = temp.fpr();
2439         unboxDouble(op1Regs.tagGPR(), op1Regs.payloadGPR(), resultFPR, tempFPR);
2440 #endif
2441         
2442         JITCompiler::Jump done = m_jit.branchDouble(
2443             JITCompiler::DoubleEqual, resultFPR, resultFPR);
2444         
2445         DFG_TYPE_CHECK(
2446             op1Regs, node->child1(), SpecBytecodeRealNumber, m_jit.branchIfNotInt32(op1Regs));
2447         m_jit.convertInt32ToDouble(op1Regs.payloadGPR(), resultFPR);
2448         
2449         done.link(&m_jit);
2450         
2451         doubleResult(resultFPR, node);
2452         return;
2453     }
2454     
2455     case NotCellUse:
2456     case NumberUse: {
2457         ASSERT(!node->child1()->isNumberConstant()); // This should have been constant folded.
2458
2459         SpeculatedType possibleTypes = m_state.forNode(node->child1()).m_type;
2460         if (isInt32Speculation(possibleTypes)) {
2461             SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
2462             FPRTemporary result(this);
2463             m_jit.convertInt32ToDouble(op1.gpr(), result.fpr());
2464             doubleResult(result.fpr(), node);
2465             return;
2466         }
2467
2468         JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2469         FPRTemporary result(this);
2470
2471 #if USE(JSVALUE64)
2472         GPRTemporary temp(this);
2473
2474         GPRReg op1GPR = op1.gpr();
2475         GPRReg tempGPR = temp.gpr();
2476         FPRReg resultFPR = result.fpr();
2477         JITCompiler::JumpList done;
2478
2479         JITCompiler::Jump isInteger = m_jit.branch64(
2480             MacroAssembler::AboveOrEqual, op1GPR, GPRInfo::tagTypeNumberRegister);
2481
2482         if (node->child1().useKind() == NotCellUse) {
2483             JITCompiler::Jump isNumber = m_jit.branchTest64(MacroAssembler::NonZero, op1GPR, GPRInfo::tagTypeNumberRegister);
2484             JITCompiler::Jump isUndefined = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueUndefined));
2485
2486             static const double zero = 0;
2487             m_jit.loadDouble(TrustedImmPtr(&zero), resultFPR);
2488
2489             JITCompiler::Jump isNull = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueNull));
2490             done.append(isNull);
2491
2492             DFG_TYPE_CHECK(JSValueRegs(op1GPR), node->child1(), ~SpecCell,
2493                 m_jit.branchTest64(JITCompiler::Zero, op1GPR, TrustedImm32(static_cast<int32_t>(TagBitBool))));
2494
2495             JITCompiler::Jump isFalse = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueFalse));
2496             static const double one = 1;
2497             m_jit.loadDouble(TrustedImmPtr(&one), resultFPR);
2498             done.append(m_jit.jump());
2499             done.append(isFalse);
2500
2501             isUndefined.link(&m_jit);
2502             static const double NaN = PNaN;
2503             m_jit.loadDouble(TrustedImmPtr(&NaN), resultFPR);
2504             done.append(m_jit.jump());
2505
2506             isNumber.link(&m_jit);
2507         } else if (needsTypeCheck(node->child1(), SpecBytecodeNumber)) {
2508             typeCheck(
2509                 JSValueRegs(op1GPR), node->child1(), SpecBytecodeNumber,
2510                 m_jit.branchTest64(MacroAssembler::Zero, op1GPR, GPRInfo::tagTypeNumberRegister));
2511         }
2512
2513         unboxDouble(op1GPR, tempGPR, resultFPR);
2514         done.append(m_jit.jump());
2515     
2516         isInteger.link(&m_jit);
2517         m_jit.convertInt32ToDouble(op1GPR, resultFPR);
2518         done.link(&m_jit);
2519 #else // USE(JSVALUE64) -> this is the 32_64 case
2520         FPRTemporary temp(this);
2521     
2522         GPRReg op1TagGPR = op1.tagGPR();
2523         GPRReg op1PayloadGPR = op1.payloadGPR();
2524         FPRReg tempFPR = temp.fpr();
2525         FPRReg resultFPR = result.fpr();
2526         JITCompiler::JumpList done;
2527     
2528         JITCompiler::Jump isInteger = m_jit.branch32(
2529             MacroAssembler::Equal, op1TagGPR, TrustedImm32(JSValue::Int32Tag));
2530
2531         if (node->child1().useKind() == NotCellUse) {
2532             JITCompiler::Jump isNumber = m_jit.branch32(JITCompiler::Below, op1TagGPR, JITCompiler::TrustedImm32(JSValue::LowestTag + 1));
2533             JITCompiler::Jump isUndefined = m_jit.branch32(JITCompiler::Equal, op1TagGPR, TrustedImm32(JSValue::UndefinedTag));
2534
2535             static const double zero = 0;
2536             m_jit.loadDouble(TrustedImmPtr(&zero), resultFPR);
2537
2538             JITCompiler::Jump isNull = m_jit.branch32(JITCompiler::Equal, op1TagGPR, TrustedImm32(JSValue::NullTag));
2539             done.append(isNull);
2540
2541             DFG_TYPE_CHECK(JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), ~SpecCell, m_jit.branch32(JITCompiler::NotEqual, op1TagGPR, TrustedImm32(JSValue::BooleanTag)));
2542
2543             JITCompiler::Jump isFalse = m_jit.branchTest32(JITCompiler::Zero, op1PayloadGPR, TrustedImm32(1));
2544             static const double one = 1;
2545             m_jit.loadDouble(TrustedImmPtr(&one), resultFPR);
2546             done.append(m_jit.jump());
2547             done.append(isFalse);
2548
2549             isUndefined.link(&m_jit);
2550             static const double NaN = PNaN;
2551             m_jit.loadDouble(TrustedImmPtr(&NaN), resultFPR);
2552             done.append(m_jit.jump());
2553
2554             isNumber.link(&m_jit);
2555         } else if (needsTypeCheck(node->child1(), SpecBytecodeNumber)) {
2556             typeCheck(
2557                 JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), SpecBytecodeNumber,
2558                 m_jit.branch32(MacroAssembler::AboveOrEqual, op1TagGPR, TrustedImm32(JSValue::LowestTag)));
2559         }
2560
2561         unboxDouble(op1TagGPR, op1PayloadGPR, resultFPR, tempFPR);
2562         done.append(m_jit.jump());
2563     
2564         isInteger.link(&m_jit);
2565         m_jit.convertInt32ToDouble(op1PayloadGPR, resultFPR);
2566         done.link(&m_jit);
2567 #endif // USE(JSVALUE64)
2568     
2569         doubleResult(resultFPR, node);
2570         return;
2571     }
2572         
2573 #if USE(JSVALUE64)
2574     case Int52RepUse: {
2575         SpeculateStrictInt52Operand value(this, node->child1());
2576         FPRTemporary result(this);
2577         
2578         GPRReg valueGPR = value.gpr();
2579         FPRReg resultFPR = result.fpr();
2580
2581         m_jit.convertInt64ToDouble(valueGPR, resultFPR);
2582         
2583         doubleResult(resultFPR, node);
2584         return;
2585     }
2586 #endif // USE(JSVALUE64)
2587         
2588     default:
2589         RELEASE_ASSERT_NOT_REACHED();
2590         return;
2591     }
2592 }
2593
2594 void SpeculativeJIT::compileValueRep(Node* node)
2595 {
2596     switch (node->child1().useKind()) {
2597     case DoubleRepUse: {
2598         SpeculateDoubleOperand value(this, node->child1());
2599         JSValueRegsTemporary result(this);
2600         
2601         FPRReg valueFPR = value.fpr();
2602         JSValueRegs resultRegs = result.regs();
2603         
2604         // It's very tempting to in-place filter the value to indicate that it's not impure NaN
2605         // anymore. Unfortunately, this would be unsound. If it's a GetLocal or if the value was
2606         // subject to a prior SetLocal, filtering the value would imply that the corresponding
2607         // local was purified.
2608         if (needsTypeCheck(node->child1(), ~SpecDoubleImpureNaN))
2609             m_jit.purifyNaN(valueFPR);
2610
2611         boxDouble(valueFPR, resultRegs);
2612         
2613         jsValueResult(resultRegs, node);
2614         return;
2615     }
2616         
2617 #if USE(JSVALUE64)
2618     case Int52RepUse: {
2619         SpeculateStrictInt52Operand value(this, node->child1());
2620         GPRTemporary result(this);
2621         
2622         GPRReg valueGPR = value.gpr();
2623         GPRReg resultGPR = result.gpr();
2624         
2625         boxInt52(valueGPR, resultGPR, DataFormatStrictInt52);
2626         
2627         jsValueResult(resultGPR, node);
2628         return;
2629     }
2630 #endif // USE(JSVALUE64)
2631         
2632     default:
2633         RELEASE_ASSERT_NOT_REACHED();
2634         return;
2635     }
2636 }
2637
2638 static double clampDoubleToByte(double d)
2639 {
2640     d += 0.5;
2641     if (!(d > 0))
2642         d = 0;
2643     else if (d > 255)
2644         d = 255;
2645     return d;
2646 }
2647
2648 static void compileClampIntegerToByte(JITCompiler& jit, GPRReg result)
2649 {
2650     MacroAssembler::Jump inBounds = jit.branch32(MacroAssembler::BelowOrEqual, result, JITCompiler::TrustedImm32(0xff));
2651     MacroAssembler::Jump tooBig = jit.branch32(MacroAssembler::GreaterThan, result, JITCompiler::TrustedImm32(0xff));
2652     jit.xorPtr(result, result);
2653     MacroAssembler::Jump clamped = jit.jump();
2654     tooBig.link(&jit);
2655     jit.move(JITCompiler::TrustedImm32(255), result);
2656     clamped.link(&jit);
2657     inBounds.link(&jit);
2658 }
2659
2660 static void compileClampDoubleToByte(JITCompiler& jit, GPRReg result, FPRReg source, FPRReg scratch)
2661 {
2662     // Unordered compare so we pick up NaN
2663     static const double zero = 0;
2664     static const double byteMax = 255;
2665     static const double half = 0.5;
2666     jit.loadDouble(JITCompiler::TrustedImmPtr(&zero), scratch);
2667     MacroAssembler::Jump tooSmall = jit.branchDouble(MacroAssembler::DoubleLessThanOrEqualOrUnordered, source, scratch);
2668     jit.loadDouble(JITCompiler::TrustedImmPtr(&byteMax), scratch);
2669     MacroAssembler::Jump tooBig = jit.branchDouble(MacroAssembler::DoubleGreaterThan, source, scratch);
2670     
2671     jit.loadDouble(JITCompiler::TrustedImmPtr(&half), scratch);
2672     // FIXME: This should probably just use a floating point round!
2673     // https://bugs.webkit.org/show_bug.cgi?id=72054
2674     jit.addDouble(source, scratch);
2675     jit.truncateDoubleToInt32(scratch, result);   
2676     MacroAssembler::Jump truncatedInt = jit.jump();
2677     
2678     tooSmall.link(&jit);
2679     jit.xorPtr(result, result);
2680     MacroAssembler::Jump zeroed = jit.jump();
2681     
2682     tooBig.link(&jit);
2683     jit.move(JITCompiler::TrustedImm32(255), result);
2684     
2685     truncatedInt.link(&jit);
2686     zeroed.link(&jit);
2687
2688 }
2689
2690 JITCompiler::Jump SpeculativeJIT::jumpForTypedArrayOutOfBounds(Node* node, GPRReg baseGPR, GPRReg indexGPR)
2691 {
2692     if (node->op() == PutByValAlias)
2693         return JITCompiler::Jump();
2694     JSArrayBufferView* view = m_jit.graph().tryGetFoldableView(
2695         m_state.forNode(m_jit.graph().child(node, 0)).m_value, node->arrayMode());
2696     if (view) {
2697         uint32_t length = view->length();
2698         Node* indexNode = m_jit.graph().child(node, 1).node();
2699         if (indexNode->isInt32Constant() && indexNode->asUInt32() < length)
2700             return JITCompiler::Jump();
2701         return m_jit.branch32(
2702             MacroAssembler::AboveOrEqual, indexGPR, MacroAssembler::Imm32(length));
2703     }
2704     return m_jit.branch32(
2705         MacroAssembler::AboveOrEqual, indexGPR,
2706         MacroAssembler::Address(baseGPR, JSArrayBufferView::offsetOfLength()));
2707 }
2708
2709 void SpeculativeJIT::emitTypedArrayBoundsCheck(Node* node, GPRReg baseGPR, GPRReg indexGPR)
2710 {
2711     JITCompiler::Jump jump = jumpForTypedArrayOutOfBounds(node, baseGPR, indexGPR);
2712     if (!jump.isSet())
2713         return;
2714     speculationCheck(OutOfBounds, JSValueRegs(), 0, jump);
2715 }
2716
2717 JITCompiler::Jump SpeculativeJIT::jumpForTypedArrayIsNeuteredIfOutOfBounds(Node* node, GPRReg base, JITCompiler::Jump outOfBounds)
2718 {
2719     JITCompiler::Jump done;
2720     if (outOfBounds.isSet()) {
2721         done = m_jit.jump();
2722         if (node->arrayMode().isInBounds())
2723             speculationCheck(OutOfBounds, JSValueSource(), 0, outOfBounds);
2724         else {
2725             outOfBounds.link(&m_jit);
2726
2727             JITCompiler::Jump notWasteful = m_jit.branch32(
2728                 MacroAssembler::NotEqual,
2729                 MacroAssembler::Address(base, JSArrayBufferView::offsetOfMode()),
2730                 TrustedImm32(WastefulTypedArray));
2731
2732             JITCompiler::Jump hasNullVector = m_jit.branchTestPtr(
2733                 MacroAssembler::Zero,
2734                 MacroAssembler::Address(base, JSArrayBufferView::offsetOfVector()));
2735             speculationCheck(Uncountable, JSValueSource(), node, hasNullVector);
2736             notWasteful.link(&m_jit);
2737         }
2738     }
2739     return done;
2740 }
2741
2742 void SpeculativeJIT::compileGetByValOnIntTypedArray(Node* node, TypedArrayType type)
2743 {
2744     ASSERT(isInt(type));
2745     
2746     SpeculateCellOperand base(this, node->child1());
2747     SpeculateStrictInt32Operand property(this, node->child2());
2748     StorageOperand storage(this, node->child3());
2749
2750     GPRReg baseReg = base.gpr();
2751     GPRReg propertyReg = property.gpr();
2752     GPRReg storageReg = storage.gpr();
2753
2754     GPRTemporary result(this);
2755     GPRReg resultReg = result.gpr();
2756
2757     ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
2758
2759     emitTypedArrayBoundsCheck(node, baseReg, propertyReg);
2760     switch (elementSize(type)) {
2761     case 1:
2762         if (isSigned(type))
2763             m_jit.load8SignedExtendTo32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2764         else
2765             m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
2766         break;
2767     case 2:
2768         if (isSigned(type))
2769             m_jit.load16SignedExtendTo32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2770         else
2771             m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
2772         break;
2773     case 4:
2774         m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
2775         break;
2776     default:
2777         CRASH();
2778     }
2779     if (elementSize(type) < 4 || isSigned(type)) {
2780         int32Result(resultReg, node);
2781         return;
2782     }
2783     
2784     ASSERT(elementSize(type) == 4 && !isSigned(type));
2785     if (node->shouldSpeculateInt32()) {
2786         speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, resultReg, TrustedImm32(0)));
2787         int32Result(resultReg, node);
2788         return;
2789     }
2790     
2791 #if USE(JSVALUE64)
2792     if (node->shouldSpeculateAnyInt()) {
2793         m_jit.zeroExtend32ToPtr(resultReg, resultReg);
2794         strictInt52Result(resultReg, node);
2795         return;
2796     }
2797 #endif
2798     
2799     FPRTemporary fresult(this);
2800     m_jit.convertInt32ToDouble(resultReg, fresult.fpr());
2801     JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, resultReg, TrustedImm32(0));
2802     m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), fresult.fpr());
2803     positive.link(&m_jit);
2804     doubleResult(fresult.fpr(), node);
2805 }
2806
2807 void SpeculativeJIT::compilePutByValForIntTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
2808 {
2809     ASSERT(isInt(type));
2810     
2811     StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
2812     GPRReg storageReg = storage.gpr();
2813     
2814     Edge valueUse = m_jit.graph().varArgChild(node, 2);
2815     
2816     GPRTemporary value;
2817 #if USE(JSVALUE32_64)
2818     GPRTemporary propertyTag;
2819     GPRTemporary valueTag;
2820 #endif
2821
2822     GPRReg valueGPR = InvalidGPRReg;
2823 #if USE(JSVALUE32_64)
2824     GPRReg propertyTagGPR = InvalidGPRReg;
2825     GPRReg valueTagGPR = InvalidGPRReg;
2826 #endif
2827
2828     JITCompiler::JumpList slowPathCases;
2829     
2830     bool isAppropriateConstant = false;
2831     if (valueUse->isConstant()) {
2832         JSValue jsValue = valueUse->asJSValue();
2833         SpeculatedType expectedType = typeFilterFor(valueUse.useKind());
2834         SpeculatedType actualType = speculationFromValue(jsValue);
2835         isAppropriateConstant = (expectedType | actualType) == expectedType;
2836     }
2837
2838     if (isAppropriateConstant) {
2839         JSValue jsValue = valueUse->asJSValue();
2840         if (!jsValue.isNumber()) {
2841             terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
2842             noResult(node);
2843             return;
2844         }
2845         double d = jsValue.asNumber();
2846         if (isClamped(type)) {
2847             ASSERT(elementSize(type) == 1);
2848             d = clampDoubleToByte(d);
2849         }
2850         GPRTemporary scratch(this);
2851         GPRReg scratchReg = scratch.gpr();
2852         m_jit.move(Imm32(toInt32(d)), scratchReg);
2853         value.adopt(scratch);
2854         valueGPR = scratchReg;
2855     } else {
2856         switch (valueUse.useKind()) {
2857         case Int32Use: {
2858             SpeculateInt32Operand valueOp(this, valueUse);
2859             GPRTemporary scratch(this);
2860             GPRReg scratchReg = scratch.gpr();
2861             m_jit.move(valueOp.gpr(), scratchReg);
2862             if (isClamped(type)) {
2863                 ASSERT(elementSize(type) == 1);
2864                 compileClampIntegerToByte(m_jit, scratchReg);
2865             }
2866             value.adopt(scratch);
2867             valueGPR = scratchReg;
2868             break;
2869         }
2870             
2871 #if USE(JSVALUE64)
2872         case Int52RepUse: {
2873             SpeculateStrictInt52Operand valueOp(this, valueUse);
2874             GPRTemporary scratch(this);
2875             GPRReg scratchReg = scratch.gpr();
2876             m_jit.move(valueOp.gpr(), scratchReg);
2877             if (isClamped(type)) {
2878                 ASSERT(elementSize(type) == 1);
2879                 MacroAssembler::Jump inBounds = m_jit.branch64(
2880                     MacroAssembler::BelowOrEqual, scratchReg, JITCompiler::TrustedImm64(0xff));
2881                 MacroAssembler::Jump tooBig = m_jit.branch64(
2882                     MacroAssembler::GreaterThan, scratchReg, JITCompiler::TrustedImm64(0xff));
2883                 m_jit.move(TrustedImm32(0), scratchReg);
2884                 MacroAssembler::Jump clamped = m_jit.jump();
2885                 tooBig.link(&m_jit);
2886                 m_jit.move(JITCompiler::TrustedImm32(255), scratchReg);
2887                 clamped.link(&m_jit);
2888                 inBounds.link(&m_jit);
2889             }
2890             value.adopt(scratch);
2891             valueGPR = scratchReg;
2892             break;
2893         }
2894 #endif // USE(JSVALUE64)
2895             
2896         case DoubleRepUse: {
2897             if (isClamped(type)) {
2898                 ASSERT(elementSize(type) == 1);
2899                 SpeculateDoubleOperand valueOp(this, valueUse);
2900                 GPRTemporary result(this);
2901                 FPRTemporary floatScratch(this);
2902                 FPRReg fpr = valueOp.fpr();
2903                 GPRReg gpr = result.gpr();
2904                 compileClampDoubleToByte(m_jit, gpr, fpr, floatScratch.fpr());
2905                 value.adopt(result);
2906                 valueGPR = gpr;
2907             } else {
2908 #if USE(JSVALUE32_64)
2909                 GPRTemporary realPropertyTag(this);
2910                 propertyTag.adopt(realPropertyTag);
2911                 propertyTagGPR = propertyTag.gpr();
2912
2913                 GPRTemporary realValueTag(this);
2914                 valueTag.adopt(realValueTag);
2915                 valueTagGPR = valueTag.gpr();
2916 #endif
2917                 SpeculateDoubleOperand valueOp(this, valueUse);
2918                 GPRTemporary result(this);
2919                 FPRReg fpr = valueOp.fpr();
2920                 GPRReg gpr = result.gpr();
2921                 MacroAssembler::Jump notNaN = m_jit.branchDouble(MacroAssembler::DoubleEqual, fpr, fpr);
2922                 m_jit.xorPtr(gpr, gpr);
2923                 MacroAssembler::JumpList fixed(m_jit.jump());
2924                 notNaN.link(&m_jit);
2925
2926                 fixed.append(m_jit.branchTruncateDoubleToInt32(
2927                     fpr, gpr, MacroAssembler::BranchIfTruncateSuccessful));
2928
2929 #if USE(JSVALUE64)
2930                 m_jit.or64(GPRInfo::tagTypeNumberRegister, property);
2931                 boxDouble(fpr, gpr);
2932 #else
2933                 m_jit.move(TrustedImm32(JSValue::Int32Tag), propertyTagGPR);
2934                 boxDouble(fpr, valueTagGPR, gpr);
2935 #endif
2936                 slowPathCases.append(m_jit.jump());
2937
2938                 fixed.link(&m_jit);
2939                 value.adopt(result);
2940                 valueGPR = gpr;
2941             }
2942             break;
2943         }
2944             
2945         default:
2946             RELEASE_ASSERT_NOT_REACHED();
2947             break;
2948         }
2949     }
2950     
2951     ASSERT_UNUSED(valueGPR, valueGPR != property);
2952     ASSERT(valueGPR != base);
2953     ASSERT(valueGPR != storageReg);
2954     JITCompiler::Jump outOfBounds = jumpForTypedArrayOutOfBounds(node, base, property);
2955
2956     switch (elementSize(type)) {
2957     case 1:
2958         m_jit.store8(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesOne));
2959         break;
2960     case 2:
2961         m_jit.store16(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesTwo));
2962         break;
2963     case 4:
2964         m_jit.store32(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
2965         break;
2966     default:
2967         CRASH();
2968     }
2969
2970     JITCompiler::Jump done = jumpForTypedArrayIsNeuteredIfOutOfBounds(node, base, outOfBounds);
2971     if (done.isSet())
2972         done.link(&m_jit);
2973
2974     if (!slowPathCases.empty()) {
2975 #if USE(JSVALUE64)
2976         if (node->op() == PutByValDirect) {
2977             addSlowPathGenerator(slowPathCall(
2978                 slowPathCases, this,
2979                 m_jit.isStrictModeFor(node->origin.semantic) ? operationPutByValDirectStrict : operationPutByValDirectNonStrict,
2980                 NoResult, base, property, valueGPR));
2981         } else {
2982             addSlowPathGenerator(slowPathCall(
2983                 slowPathCases, this,
2984                 m_jit.isStrictModeFor(node->origin.semantic) ? operationPutByValStrict : operationPutByValNonStrict,
2985                 NoResult, base, property, valueGPR));
2986         }
2987 #else // not USE(JSVALUE64)
2988         if (node->op() == PutByValDirect) {
2989             addSlowPathGenerator(slowPathCall(
2990                 slowPathCases, this,
2991                 m_jit.codeBlock()->isStrictMode() ? operationPutByValDirectCellStrict : operationPutByValDirectCellNonStrict,
2992                 NoResult, base, JSValueRegs(propertyTagGPR, property), JSValueRegs(valueTagGPR, valueGPR)));
2993         } else {
2994             addSlowPathGenerator(slowPathCall(
2995                 slowPathCases, this,
2996                 m_jit.codeBlock()->isStrictMode() ? operationPutByValCellStrict : operationPutByValCellNonStrict,
2997                 NoResult, base, JSValueRegs(propertyTagGPR, property), JSValueRegs(valueTagGPR, valueGPR)));
2998         }
2999 #endif
3000     }
3001     noResult(node);
3002 }
3003
3004 void SpeculativeJIT::compileGetByValOnFloatTypedArray(Node* node, TypedArrayType type)
3005 {
3006     ASSERT(isFloat(type));
3007     
3008     SpeculateCellOperand base(this, node->child1());
3009     SpeculateStrictInt32Operand property(this, node->child2());
3010     StorageOperand storage(this, node->child3());
3011
3012     GPRReg baseReg = base.gpr();
3013     GPRReg propertyReg = property.gpr();
3014     GPRReg storageReg = storage.gpr();
3015
3016     ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
3017
3018     FPRTemporary result(this);
3019     FPRReg resultReg = result.fpr();
3020     emitTypedArrayBoundsCheck(node, baseReg, propertyReg);
3021     switch (elementSize(type)) {
3022     case 4:
3023         m_jit.loadFloat(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
3024         m_jit.convertFloatToDouble(resultReg, resultReg);
3025         break;
3026     case 8: {
3027         m_jit.loadDouble(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight), resultReg);
3028         break;
3029     }
3030     default:
3031         RELEASE_ASSERT_NOT_REACHED();
3032     }
3033     
3034     doubleResult(resultReg, node);
3035 }
3036
3037 void SpeculativeJIT::compilePutByValForFloatTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
3038 {
3039     ASSERT(isFloat(type));
3040     
3041     StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
3042     GPRReg storageReg = storage.gpr();
3043     
3044     Edge baseUse = m_jit.graph().varArgChild(node, 0);
3045     Edge valueUse = m_jit.graph().varArgChild(node, 2);
3046
3047     SpeculateDoubleOperand valueOp(this, valueUse);
3048     FPRTemporary scratch(this);
3049     FPRReg valueFPR = valueOp.fpr();
3050     FPRReg scratchFPR = scratch.fpr();
3051
3052     ASSERT_UNUSED(baseUse, node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(baseUse)));
3053     
3054     MacroAssembler::Jump outOfBounds = jumpForTypedArrayOutOfBounds(node, base, property);
3055     
3056     switch (elementSize(type)) {
3057     case 4: {
3058         m_jit.moveDouble(valueFPR, scratchFPR);
3059         m_jit.convertDoubleToFloat(valueFPR, scratchFPR);
3060         m_jit.storeFloat(scratchFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
3061         break;
3062     }
3063     case 8:
3064         m_jit.storeDouble(valueFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesEight));
3065         break;
3066     default:
3067         RELEASE_ASSERT_NOT_REACHED();
3068     }
3069
3070     JITCompiler::Jump done = jumpForTypedArrayIsNeuteredIfOutOfBounds(node, base, outOfBounds);
3071     if (done.isSet())
3072         done.link(&m_jit);
3073     noResult(node);
3074 }
3075
3076 void SpeculativeJIT::compileInstanceOfForObject(Node*, GPRReg valueReg, GPRReg prototypeReg, GPRReg scratchReg, GPRReg scratch2Reg)
3077 {
3078     // Check that prototype is an object.
3079     speculationCheck(BadType, JSValueRegs(), 0, m_jit.branchIfNotObject(prototypeReg));
3080     
3081     // Initialize scratchReg with the value being checked.
3082     m_jit.move(valueReg, scratchReg);
3083     
3084     // Walk up the prototype chain of the value (in scratchReg), comparing to prototypeReg.
3085     MacroAssembler::Label loop(&m_jit);
3086     MacroAssembler::Jump performDefaultHasInstance = m_jit.branch8(MacroAssembler::Equal,
3087         MacroAssembler::Address(scratchReg, JSCell::typeInfoTypeOffset()), TrustedImm32(ProxyObjectType));
3088     m_jit.emitLoadStructure(*m_jit.vm(), scratchReg, scratchReg, scratch2Reg);
3089     m_jit.loadPtr(MacroAssembler::Address(scratchReg, Structure::prototypeOffset() + CellPayloadOffset), scratchReg);
3090     MacroAssembler::Jump isInstance = m_jit.branchPtr(MacroAssembler::Equal, scratchReg, prototypeReg);
3091 #if USE(JSVALUE64)
3092     m_jit.branchIfCell(JSValueRegs(scratchReg)).linkTo(loop, &m_jit);
3093 #else
3094     m_jit.branchTestPtr(MacroAssembler::NonZero, scratchReg).linkTo(loop, &m_jit);
3095 #endif
3096     
3097     // No match - result is false.
3098 #if USE(JSVALUE64)
3099     m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(false))), scratchReg);
3100 #else
3101     m_jit.move(MacroAssembler::TrustedImm32(0), scratchReg);
3102 #endif
3103     MacroAssembler::JumpList doneJumps; 
3104     doneJumps.append(m_jit.jump());
3105
3106     performDefaultHasInstance.link(&m_jit);
3107     silentSpillAllRegisters(scratchReg);
3108     callOperation(operationDefaultHasInstance, scratchReg, valueReg, prototypeReg); 
3109     silentFillAllRegisters(scratchReg);
3110     m_jit.exceptionCheck();
3111 #if USE(JSVALUE64)
3112     m_jit.or32(TrustedImm32(ValueFalse), scratchReg);
3113 #endif
3114     doneJumps.append(m_jit.jump());
3115     
3116     isInstance.link(&m_jit);
3117 #if USE(JSVALUE64)
3118     m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(true))), scratchReg);
3119 #else
3120     m_jit.move(MacroAssembler::TrustedImm32(1), scratchReg);
3121 #endif
3122     
3123     doneJumps.link(&m_jit);
3124 }
3125
3126 void SpeculativeJIT::compileCheckTypeInfoFlags(Node* node)
3127 {
3128     SpeculateCellOperand base(this, node->child1());
3129
3130     GPRReg baseGPR = base.gpr();
3131
3132     speculationCheck(BadTypeInfoFlags, JSValueRegs(), 0, m_jit.branchTest8(MacroAssembler::Zero, MacroAssembler::Address(baseGPR, JSCell::typeInfoFlagsOffset()), MacroAssembler::TrustedImm32(node->typeInfoOperand())));
3133
3134     noResult(node);
3135 }
3136
3137 void SpeculativeJIT::compileParseInt(Node* node)
3138 {
3139     RELEASE_ASSERT(node->child1().useKind() == UntypedUse || node->child1().useKind() == StringUse);
3140
3141     GPRFlushedCallResult resultPayload(this);
3142     GPRReg resultPayloadGPR = resultPayload.gpr();
3143 #if USE(JSVALUE64)
3144     JSValueRegs resultRegs(resultPayloadGPR);
3145 #else
3146     GPRFlushedCallResult2 resultTag(this);
3147     GPRReg resultTagGPR = resultTag.gpr();
3148     JSValueRegs resultRegs(resultTagGPR, resultPayloadGPR);
3149 #endif
3150
3151     if (node->child2()) {
3152         SpeculateInt32Operand radix(this, node->child2());
3153         GPRReg radixGPR = radix.gpr();
3154         if (node->child1().useKind() == UntypedUse) {
3155             JSValueOperand value(this, node->child1());
3156
3157             flushRegisters();
3158 #if USE(JSVALUE64)
3159             callOperation(operationParseIntGeneric, resultRegs.gpr(), value.gpr(), radixGPR);
3160 #else
3161             callOperation(operationParseIntGeneric, resultRegs, value.jsValueRegs(), radixGPR);
3162 #endif
3163             m_jit.exceptionCheck();
3164         } else {
3165             SpeculateCellOperand value(this, node->child1());
3166             GPRReg valueGPR = value.gpr();
3167             speculateString(node->child1(), valueGPR);
3168
3169             flushRegisters();
3170 #if USE(JSVALUE64)
3171             callOperation(operationParseIntString, resultRegs.gpr(), valueGPR, radixGPR);
3172 #else
3173             callOperation(operationParseIntString, resultRegs, valueGPR, radixGPR);
3174 #endif
3175             m_jit.exceptionCheck();
3176         }
3177     } else {
3178         if (node->child1().useKind() == UntypedUse) {
3179             JSValueOperand value(this, node->child1());
3180
3181             flushRegisters();
3182 #if USE(JSVALUE64)
3183             callOperation(operationParseIntNoRadixGeneric, resultRegs.gpr(), value.jsValueRegs());
3184 #else
3185             callOperation(operationParseIntNoRadixGeneric, resultRegs, value.jsValueRegs());
3186 #endif
3187             m_jit.exceptionCheck();
3188         } else {
3189             SpeculateCellOperand value(this, node->child1());
3190             GPRReg valueGPR = value.gpr();
3191             speculateString(node->child1(), valueGPR);
3192
3193             flushRegisters();
3194             callOperation(operationParseIntStringNoRadix, resultRegs, valueGPR);
3195             m_jit.exceptionCheck();
3196         }
3197     }
3198
3199     jsValueResult(resultRegs, node);
3200 }
3201
3202 void SpeculativeJIT::compileInstanceOf(Node* node)
3203 {
3204     if (node->child1().useKind() == UntypedUse) {
3205         // It might not be a cell. Speculate less aggressively.
3206         // Or: it might only be used once (i.e. by us), so we get zero benefit
3207         // from speculating any more aggressively than we absolutely need to.
3208         
3209         JSValueOperand value(this, node->child1());
3210         SpeculateCellOperand prototype(this, node->child2());
3211         GPRTemporary scratch(this);
3212         GPRTemporary scratch2(this);
3213         
3214         GPRReg prototypeReg = prototype.gpr();
3215         GPRReg scratchReg = scratch.gpr();
3216         GPRReg scratch2Reg = scratch2.gpr();
3217         
3218         MacroAssembler::Jump isCell = m_jit.branchIfCell(value.jsValueRegs());
3219         GPRReg valueReg = value.jsValueRegs().payloadGPR();
3220         moveFalseTo(scratchReg);
3221
3222         MacroAssembler::Jump done = m_jit.jump();
3223         
3224         isCell.link(&m_jit);
3225         
3226         compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg, scratch2Reg);
3227         
3228         done.link(&m_jit);
3229
3230         blessedBooleanResult(scratchReg, node);
3231         return;
3232     }
3233     
3234     SpeculateCellOperand value(this, node->child1());
3235     SpeculateCellOperand prototype(this, node->child2());
3236     
3237     GPRTemporary scratch(this);
3238     GPRTemporary scratch2(this);
3239     
3240     GPRReg valueReg = value.gpr();
3241     GPRReg prototypeReg = prototype.gpr();
3242     GPRReg scratchReg = scratch.gpr();
3243     GPRReg scratch2Reg = scratch2.gpr();
3244     
3245     compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg, scratch2Reg);
3246
3247     blessedBooleanResult(scratchReg, node);
3248 }
3249
3250 template<typename SnippetGenerator, J_JITOperation_EJJ snippetSlowPathFunction>
3251 void SpeculativeJIT::emitUntypedBitOp(Node* node)
3252 {
3253     Edge& leftChild = node->child1();
3254     Edge& rightChild = node->child2();
3255
3256     if (isKnownNotNumber(leftChild.node()) || isKnownNotNumber(rightChild.node())) {
3257         JSValueOperand left(this, leftChild);
3258         JSValueOperand right(this, rightChild);
3259         JSValueRegs leftRegs = left.jsValueRegs();
3260         JSValueRegs rightRegs = right.jsValueRegs();
3261 #if USE(JSVALUE64)
3262         GPRTemporary result(this);
3263         JSValueRegs resultRegs = JSValueRegs(result.gpr());
3264 #else
3265         GPRTemporary resultTag(this);
3266         GPRTemporary resultPayload(this);
3267         JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3268 #endif
3269         flushRegisters();
3270         callOperation(snippetSlowPathFunction, resultRegs, leftRegs, rightRegs);
3271         m_jit.exceptionCheck();
3272
3273         jsValueResult(resultRegs, node);
3274         return;
3275     }
3276
3277     std::optional<JSValueOperand> left;
3278     std::optional<JSValueOperand> right;
3279
3280     JSValueRegs leftRegs;
3281     JSValueRegs rightRegs;
3282
3283 #if USE(JSVALUE64)
3284     GPRTemporary result(this);
3285     JSValueRegs resultRegs = JSValueRegs(result.gpr());
3286     GPRTemporary scratch(this);
3287     GPRReg scratchGPR = scratch.gpr();
3288 #else
3289     GPRTemporary resultTag(this);
3290     GPRTemporary resultPayload(this);
3291     JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
3292     GPRReg scratchGPR = resultTag.gpr();
3293 #endif
3294
3295     SnippetOperand leftOperand;
3296     SnippetOperand rightOperand;
3297
3298     // The snippet generator does not support both operands being constant. If the left
3299     // operand is already const, we'll ignore the right operand's constness.
3300     if (leftChild->isInt32Constant())
3301         leftOperand.setConstInt32(leftChild->asInt32());
3302     else if (rightChild->isInt32Constant())
3303         rightOperand.setConstInt32(rightChild->asInt32());
3304
3305     RELEASE_ASSERT(!leftOperand.isConst() || !rightOperand.isConst());
3306
3307     if (!leftOperand.isConst()) {
3308         left.emplace(this, leftChild);
3309         leftRegs = left->jsValueRegs();
3310     }
3311     if (!rightOperand.isConst()) {
3312         right.emplace(this, rightChild);
3313         rightRegs = right->jsValueRegs();
3314     }
3315
3316     SnippetGenerator gen(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs, scratchGPR);
3317     gen.generateFastPath(m_jit);
3318
3319     ASSERT(gen.didEmitFastPath());
3320     gen.endJumpList().append(m_jit.jump());
3321
3322     gen.slowPathJumpList().link(&m_jit);
3323     silentSpillAllRegisters(resultRegs);
3324
3325     if (leftOperand.isConst()) {
3326         leftRegs = resultRegs;
3327         m_jit.moveValue(leftChild->asJSValue(), leftRegs);
3328     } else if (rightOperand.isConst()) {
3329         rightRegs = resultRegs;
3330         m_jit.moveValue(rightChild->asJSValue(), rightRegs);
3331     }
3332
3333     callOperation(snippetSlowPathFunction, resultRegs, leftRegs, rightRegs);
3334
3335     silentFillAllRegisters(resultRegs);
3336     m_jit.exceptionCheck();
3337
3338     gen.endJumpList().link(&m_jit);
3339     jsValueResult(resultRegs, node);
3340 }
3341
3342 void SpeculativeJIT::compileBitwiseOp(Node* node)
3343 {
3344     NodeType op = node->op();
3345     Edge& leftChild = node->child1();
3346     Edge& rightChild = node->child2();
3347
3348     if (leftChild.useKind() == UntypedUse || rightChild.useKind() == UntypedUse) {
3349         switch (op) {
3350         case BitAnd:
3351             emitUntypedBitOp<JITBitAndGenerator, operationValueBitAnd>(node);
3352             return;
3353         case BitOr:
3354             emitUntypedBitOp<JITBitOrGenerator, operationValueBitOr>(node);
3355             return;
3356         case BitXor:
3357             emitUntypedBitOp<JITBitXorGenerator, operationValueBitXor>(node);
3358             return;
3359         default:
3360             RELEASE_ASSERT_NOT_REACHED();
3361         }
3362     }
3363
3364     if (leftChild->isInt32Constant()) {
3365         SpeculateInt32Operand op2(this, rightChild);
3366         GPRTemporary result(this, Reuse, op2);
3367
3368         bitOp(op, leftChild->asInt32(), op2.gpr(), result.gpr());
3369
3370         int32Result(result.gpr(), node);
3371
3372     } else if (rightChild->isInt32Constant()) {
3373         SpeculateInt32Operand op1(this, leftChild);
3374         GPRTemporary result(this, Reuse, op1);
3375
3376         bitOp(op, rightChild->asInt32(), op1.gpr(), result.gpr());
3377
3378         int32Result(result.gpr(), node);
3379
3380     } else {
3381         SpeculateInt32Operand op1(this, leftChild);
3382         SpeculateInt32Operand op2(this, rightChild);
3383         GPRTemporary result(this, Reuse, op1, op2);
3384         
3385         GPRReg reg1 = op1.gpr();
3386         GPRReg reg2 = op2.gpr();
3387         bitOp(op, reg1, reg2, result.gpr());
3388         
3389         int32Result(result.gpr(), node);
3390     }
3391 }
3392
3393 void SpeculativeJIT::emitUntypedRightShiftBitOp(Node* node)
3394 {
3395     J_JITOperation_EJJ snippetSlowPathFunction = node->op() == BitRShift
3396         ? operationValueBitRShift : operationValueBitURShift;
3397     JITRightShiftGenerator::ShiftType shiftType = node->op() == BitRShift
3398         ? JITRightShiftGenerator::SignedShift : JITRightShiftGenerator::UnsignedShift;
3399
3400     Edge& leftChild = node->child1();
3401     Edge& rightChild = node->child2();
3402
3403     if (isKnownNotNumber(leftChild.node()) || isKnownNotNumber(rightChild.node())) {
3404         JSValueOperand left(this, leftChild);
3405         JSValueOperand right(this, rightChild);
3406         JSValueRegs leftRegs = left.jsValueRegs();
3407         JSValueRegs rightRegs = right.jsValueRegs();
3408 #if USE(JSVALUE64)
3409         GPRTemporary result(this);
3410         JSValueRegs resultRegs = JSValueRegs(result.gpr());
3411 #else
3412         GPRTemporary resultTag(this);
3413         GPRTemporary resultPaylo