2 * Copyright (C) 2011-2017 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include "DFGSpeculativeJIT.h"
31 #include "ArrayPrototype.h"
32 #include "AtomicsObject.h"
33 #include "CallFrameShuffler.h"
34 #include "DFGAbstractInterpreterInlines.h"
35 #include "DFGCallArrayAllocatorSlowPathGenerator.h"
36 #include "DFGOperations.h"
37 #include "DFGSlowPathGenerator.h"
38 #include "DirectArguments.h"
39 #include "GetterSetter.h"
40 #include "HasOwnPropertyCache.h"
41 #include "JSCInlines.h"
42 #include "JSLexicalEnvironment.h"
44 #include "JSPropertyNameEnumerator.h"
46 #include "ObjectPrototype.h"
47 #include "SetupVarargsFrame.h"
48 #include "SpillRegistersMode.h"
49 #include "StringPrototype.h"
50 #include "TypeProfilerLog.h"
53 namespace JSC { namespace DFG {
57 void SpeculativeJIT::boxInt52(GPRReg sourceGPR, GPRReg targetGPR, DataFormat format)
60 if (sourceGPR == targetGPR)
65 FPRReg fpr = fprAllocate();
67 if (format == DataFormatInt52)
68 m_jit.rshift64(TrustedImm32(JSValue::int52ShiftAmount), sourceGPR);
70 ASSERT(format == DataFormatStrictInt52);
72 m_jit.boxInt52(sourceGPR, targetGPR, tempGPR, fpr);
74 if (format == DataFormatInt52 && sourceGPR != targetGPR)
75 m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), sourceGPR);
77 if (tempGPR != targetGPR)
83 GPRReg SpeculativeJIT::fillJSValue(Edge edge)
85 VirtualRegister virtualRegister = edge->virtualRegister();
86 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
88 switch (info.registerFormat()) {
89 case DataFormatNone: {
90 GPRReg gpr = allocate();
92 if (edge->hasConstant()) {
93 JSValue jsValue = edge->asJSValue();
94 m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsValue)), gpr);
95 info.fillJSValue(*m_stream, gpr, DataFormatJS);
96 m_gprs.retain(gpr, virtualRegister, SpillOrderConstant);
98 DataFormat spillFormat = info.spillFormat();
99 m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
100 switch (spillFormat) {
101 case DataFormatInt32: {
102 m_jit.load32(JITCompiler::addressFor(virtualRegister), gpr);
103 m_jit.or64(GPRInfo::tagTypeNumberRegister, gpr);
104 spillFormat = DataFormatJSInt32;
109 m_jit.load64(JITCompiler::addressFor(virtualRegister), gpr);
110 DFG_ASSERT(m_jit.graph(), m_currentNode, spillFormat & DataFormatJS);
113 info.fillJSValue(*m_stream, gpr, spillFormat);
118 case DataFormatInt32: {
119 GPRReg gpr = info.gpr();
120 // If the register has already been locked we need to take a copy.
121 // If not, we'll zero extend in place, so mark on the info that this is now type DataFormatInt32, not DataFormatJSInt32.
122 if (m_gprs.isLocked(gpr)) {
123 GPRReg result = allocate();
124 m_jit.or64(GPRInfo::tagTypeNumberRegister, gpr, result);
128 m_jit.or64(GPRInfo::tagTypeNumberRegister, gpr);
129 info.fillJSValue(*m_stream, gpr, DataFormatJSInt32);
134 // No retag required on JSVALUE64!
136 case DataFormatJSInt32:
137 case DataFormatJSDouble:
138 case DataFormatJSCell:
139 case DataFormatJSBoolean: {
140 GPRReg gpr = info.gpr();
145 case DataFormatBoolean:
146 case DataFormatStorage:
147 case DataFormatDouble:
148 case DataFormatInt52:
149 // this type currently never occurs
150 DFG_CRASH(m_jit.graph(), m_currentNode, "Bad data format");
153 DFG_CRASH(m_jit.graph(), m_currentNode, "Corrupt data format");
154 return InvalidGPRReg;
158 void SpeculativeJIT::cachedGetById(CodeOrigin origin, JSValueRegs base, JSValueRegs result, unsigned identifierNumber, JITCompiler::Jump slowPathTarget , SpillRegistersMode mode, AccessType type)
160 cachedGetById(origin, base.gpr(), result.gpr(), identifierNumber, slowPathTarget, mode, type);
163 void SpeculativeJIT::cachedGetById(CodeOrigin codeOrigin, GPRReg baseGPR, GPRReg resultGPR, unsigned identifierNumber, JITCompiler::Jump slowPathTarget, SpillRegistersMode spillMode, AccessType type)
165 CallSiteIndex callSite = m_jit.recordCallSiteAndGenerateExceptionHandlingOSRExitIfNeeded(codeOrigin, m_stream->size());
166 RegisterSet usedRegisters = this->usedRegisters();
167 if (spillMode == DontSpill) {
168 // We've already flushed registers to the stack, we don't need to spill these.
169 usedRegisters.set(baseGPR, false);
170 usedRegisters.set(resultGPR, false);
172 JITGetByIdGenerator gen(
173 m_jit.codeBlock(), codeOrigin, callSite, usedRegisters, identifierUID(identifierNumber),
174 JSValueRegs(baseGPR), JSValueRegs(resultGPR), type);
175 gen.generateFastPath(m_jit);
177 JITCompiler::JumpList slowCases;
178 if (slowPathTarget.isSet())
179 slowCases.append(slowPathTarget);
180 slowCases.append(gen.slowPathJump());
182 auto slowPath = slowPathCall(
183 slowCases, this, type == AccessType::Get ? operationGetByIdOptimize : operationTryGetByIdOptimize,
184 spillMode, ExceptionCheckRequirement::CheckNeeded,
185 resultGPR, gen.stubInfo(), baseGPR, identifierUID(identifierNumber));
187 m_jit.addGetById(gen, slowPath.get());
188 addSlowPathGenerator(WTFMove(slowPath));
191 void SpeculativeJIT::cachedGetByIdWithThis(CodeOrigin codeOrigin, GPRReg baseGPR, GPRReg thisGPR, GPRReg resultGPR, unsigned identifierNumber, JITCompiler::JumpList slowPathTarget)
193 CallSiteIndex callSite = m_jit.recordCallSiteAndGenerateExceptionHandlingOSRExitIfNeeded(codeOrigin, m_stream->size());
194 RegisterSet usedRegisters = this->usedRegisters();
195 // We've already flushed registers to the stack, we don't need to spill these.
196 usedRegisters.set(baseGPR, false);
197 usedRegisters.set(thisGPR, false);
198 usedRegisters.set(resultGPR, false);
200 JITGetByIdWithThisGenerator gen(
201 m_jit.codeBlock(), codeOrigin, callSite, usedRegisters, identifierUID(identifierNumber),
202 JSValueRegs(resultGPR), JSValueRegs(baseGPR), JSValueRegs(thisGPR), AccessType::GetWithThis);
203 gen.generateFastPath(m_jit);
205 JITCompiler::JumpList slowCases;
206 if (!slowPathTarget.empty())
207 slowCases.append(slowPathTarget);
208 slowCases.append(gen.slowPathJump());
210 auto slowPath = slowPathCall(
211 slowCases, this, operationGetByIdWithThisOptimize,
212 DontSpill, ExceptionCheckRequirement::CheckNeeded,
213 resultGPR, gen.stubInfo(), baseGPR, thisGPR, identifierUID(identifierNumber));
215 m_jit.addGetByIdWithThis(gen, slowPath.get());
216 addSlowPathGenerator(WTFMove(slowPath));
219 void SpeculativeJIT::cachedPutById(CodeOrigin codeOrigin, GPRReg baseGPR, GPRReg valueGPR, GPRReg scratchGPR, unsigned identifierNumber, PutKind putKind, JITCompiler::Jump slowPathTarget, SpillRegistersMode spillMode)
221 CallSiteIndex callSite = m_jit.recordCallSiteAndGenerateExceptionHandlingOSRExitIfNeeded(codeOrigin, m_stream->size());
222 RegisterSet usedRegisters = this->usedRegisters();
223 if (spillMode == DontSpill) {
224 // We've already flushed registers to the stack, we don't need to spill these.
225 usedRegisters.set(baseGPR, false);
226 usedRegisters.set(valueGPR, false);
229 JITPutByIdGenerator gen(
230 m_jit.codeBlock(), codeOrigin, callSite, usedRegisters, JSValueRegs(baseGPR),
231 JSValueRegs(valueGPR), scratchGPR, m_jit.ecmaModeFor(codeOrigin), putKind);
233 gen.generateFastPath(m_jit);
235 JITCompiler::JumpList slowCases;
236 if (slowPathTarget.isSet())
237 slowCases.append(slowPathTarget);
238 slowCases.append(gen.slowPathJump());
240 auto slowPath = slowPathCall(
241 slowCases, this, gen.slowPathFunction(), NoResult, gen.stubInfo(), valueGPR, baseGPR,
242 identifierUID(identifierNumber));
244 m_jit.addPutById(gen, slowPath.get());
245 addSlowPathGenerator(WTFMove(slowPath));
248 void SpeculativeJIT::nonSpeculativeNonPeepholeCompareNullOrUndefined(Edge operand)
250 ASSERT_WITH_MESSAGE(!masqueradesAsUndefinedWatchpointIsStillValid() || !isKnownCell(operand.node()), "The Compare should have been eliminated, it is known to be always false.");
252 JSValueOperand arg(this, operand, ManualOperandSpeculation);
253 GPRReg argGPR = arg.gpr();
255 GPRTemporary result(this);
256 GPRReg resultGPR = result.gpr();
258 m_jit.move(TrustedImm32(0), resultGPR);
260 JITCompiler::JumpList done;
261 if (masqueradesAsUndefinedWatchpointIsStillValid()) {
262 if (!isKnownNotCell(operand.node()))
263 done.append(m_jit.branchIfCell(JSValueRegs(argGPR)));
265 GPRTemporary localGlobalObject(this);
266 GPRTemporary remoteGlobalObject(this);
267 GPRTemporary scratch(this);
269 JITCompiler::Jump notCell;
270 if (!isKnownCell(operand.node()))
271 notCell = m_jit.branchIfNotCell(JSValueRegs(argGPR));
273 JITCompiler::Jump isNotMasqueradesAsUndefined = m_jit.branchTest8(
275 JITCompiler::Address(argGPR, JSCell::typeInfoFlagsOffset()),
276 JITCompiler::TrustedImm32(MasqueradesAsUndefined));
277 done.append(isNotMasqueradesAsUndefined);
279 GPRReg localGlobalObjectGPR = localGlobalObject.gpr();
280 GPRReg remoteGlobalObjectGPR = remoteGlobalObject.gpr();
281 m_jit.move(TrustedImmPtr::weakPointer(m_jit.graph(), m_jit.graph().globalObjectFor(m_currentNode->origin.semantic)), localGlobalObjectGPR);
282 m_jit.emitLoadStructure(*m_jit.vm(), argGPR, resultGPR, scratch.gpr());
283 m_jit.loadPtr(JITCompiler::Address(resultGPR, Structure::globalObjectOffset()), remoteGlobalObjectGPR);
284 m_jit.comparePtr(JITCompiler::Equal, localGlobalObjectGPR, remoteGlobalObjectGPR, resultGPR);
285 done.append(m_jit.jump());
286 if (!isKnownCell(operand.node()))
287 notCell.link(&m_jit);
290 if (!isKnownNotOther(operand.node())) {
291 m_jit.move(argGPR, resultGPR);
292 m_jit.and64(JITCompiler::TrustedImm32(~TagBitUndefined), resultGPR);
293 m_jit.compare64(JITCompiler::Equal, resultGPR, JITCompiler::TrustedImm32(ValueNull), resultGPR);
298 m_jit.or32(TrustedImm32(ValueFalse), resultGPR);
299 jsValueResult(resultGPR, m_currentNode, DataFormatJSBoolean);
302 void SpeculativeJIT::nonSpeculativePeepholeBranchNullOrUndefined(Edge operand, Node* branchNode)
304 BasicBlock* taken = branchNode->branchData()->taken.block;
305 BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
307 JSValueOperand arg(this, operand, ManualOperandSpeculation);
308 GPRReg argGPR = arg.gpr();
310 GPRTemporary result(this, Reuse, arg);
311 GPRReg resultGPR = result.gpr();
313 // First, handle the case where "operand" is a cell.
314 if (masqueradesAsUndefinedWatchpointIsStillValid()) {
315 if (!isKnownNotCell(operand.node())) {
316 JITCompiler::Jump isCell = m_jit.branchIfCell(JSValueRegs(argGPR));
317 addBranch(isCell, notTaken);
320 GPRTemporary localGlobalObject(this);
321 GPRTemporary remoteGlobalObject(this);
322 GPRTemporary scratch(this);
324 JITCompiler::Jump notCell;
325 if (!isKnownCell(operand.node()))
326 notCell = m_jit.branchIfNotCell(JSValueRegs(argGPR));
328 branchTest8(JITCompiler::Zero,
329 JITCompiler::Address(argGPR, JSCell::typeInfoFlagsOffset()),
330 JITCompiler::TrustedImm32(MasqueradesAsUndefined), notTaken);
332 GPRReg localGlobalObjectGPR = localGlobalObject.gpr();
333 GPRReg remoteGlobalObjectGPR = remoteGlobalObject.gpr();
334 m_jit.move(TrustedImmPtr::weakPointer(m_jit.graph(), m_jit.graph().globalObjectFor(m_currentNode->origin.semantic)), localGlobalObjectGPR);
335 m_jit.emitLoadStructure(*m_jit.vm(), argGPR, resultGPR, scratch.gpr());
336 m_jit.loadPtr(JITCompiler::Address(resultGPR, Structure::globalObjectOffset()), remoteGlobalObjectGPR);
337 branchPtr(JITCompiler::Equal, localGlobalObjectGPR, remoteGlobalObjectGPR, taken);
339 if (!isKnownCell(operand.node())) {
340 jump(notTaken, ForceJump);
341 notCell.link(&m_jit);
345 if (isKnownNotOther(operand.node()))
348 JITCompiler::RelationalCondition condition = JITCompiler::Equal;
349 if (taken == nextBlock()) {
350 condition = JITCompiler::NotEqual;
351 std::swap(taken, notTaken);
353 m_jit.move(argGPR, resultGPR);
354 m_jit.and64(JITCompiler::TrustedImm32(~TagBitUndefined), resultGPR);
355 branch64(condition, resultGPR, JITCompiler::TrustedImm64(ValueNull), taken);
360 void SpeculativeJIT::nonSpeculativePeepholeBranch(Node* node, Node* branchNode, MacroAssembler::RelationalCondition cond, S_JITOperation_EJJ helperFunction)
362 BasicBlock* taken = branchNode->branchData()->taken.block;
363 BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
365 JITCompiler::ResultCondition callResultCondition = JITCompiler::NonZero;
367 // The branch instruction will branch to the taken block.
368 // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
369 if (taken == nextBlock()) {
370 cond = JITCompiler::invert(cond);
371 callResultCondition = JITCompiler::Zero;
372 BasicBlock* tmp = taken;
377 JSValueOperand arg1(this, node->child1());
378 JSValueOperand arg2(this, node->child2());
379 GPRReg arg1GPR = arg1.gpr();
380 GPRReg arg2GPR = arg2.gpr();
382 JITCompiler::JumpList slowPath;
384 if (isKnownNotInteger(node->child1().node()) || isKnownNotInteger(node->child2().node())) {
385 GPRFlushedCallResult result(this);
386 GPRReg resultGPR = result.gpr();
392 callOperation(helperFunction, resultGPR, arg1GPR, arg2GPR);
393 m_jit.exceptionCheck();
395 branchTest32(callResultCondition, resultGPR, taken);
397 GPRTemporary result(this, Reuse, arg2);
398 GPRReg resultGPR = result.gpr();
403 if (!isKnownInteger(node->child1().node()))
404 slowPath.append(m_jit.branch64(MacroAssembler::Below, arg1GPR, GPRInfo::tagTypeNumberRegister));
405 if (!isKnownInteger(node->child2().node()))
406 slowPath.append(m_jit.branch64(MacroAssembler::Below, arg2GPR, GPRInfo::tagTypeNumberRegister));
408 branch32(cond, arg1GPR, arg2GPR, taken);
410 if (!isKnownInteger(node->child1().node()) || !isKnownInteger(node->child2().node())) {
411 jump(notTaken, ForceJump);
413 slowPath.link(&m_jit);
415 silentSpillAllRegisters(resultGPR);
416 callOperation(helperFunction, resultGPR, arg1GPR, arg2GPR);
417 silentFillAllRegisters();
418 m_jit.exceptionCheck();
420 branchTest32(callResultCondition, resultGPR, taken);
426 m_indexInBlock = m_block->size() - 1;
427 m_currentNode = branchNode;
430 template<typename JumpType>
431 class CompareAndBoxBooleanSlowPathGenerator
432 : public CallSlowPathGenerator<JumpType, S_JITOperation_EJJ, GPRReg> {
434 CompareAndBoxBooleanSlowPathGenerator(
435 JumpType from, SpeculativeJIT* jit,
436 S_JITOperation_EJJ function, GPRReg result, GPRReg arg1, GPRReg arg2)
437 : CallSlowPathGenerator<JumpType, S_JITOperation_EJJ, GPRReg>(
438 from, jit, function, NeedToSpill, ExceptionCheckRequirement::CheckNeeded, result)
445 void generateInternal(SpeculativeJIT* jit) override
448 this->recordCall(jit->callOperation(this->m_function, this->m_result, m_arg1, m_arg2));
449 jit->m_jit.and32(JITCompiler::TrustedImm32(1), this->m_result);
450 jit->m_jit.or32(JITCompiler::TrustedImm32(ValueFalse), this->m_result);
459 void SpeculativeJIT::nonSpeculativeNonPeepholeCompare(Node* node, MacroAssembler::RelationalCondition cond, S_JITOperation_EJJ helperFunction)
461 ASSERT(node->isBinaryUseKind(UntypedUse));
462 JSValueOperand arg1(this, node->child1());
463 JSValueOperand arg2(this, node->child2());
464 GPRReg arg1GPR = arg1.gpr();
465 GPRReg arg2GPR = arg2.gpr();
467 JITCompiler::JumpList slowPath;
469 if (isKnownNotInteger(node->child1().node()) || isKnownNotInteger(node->child2().node())) {
470 GPRFlushedCallResult result(this);
471 GPRReg resultGPR = result.gpr();
477 callOperation(helperFunction, resultGPR, arg1GPR, arg2GPR);
478 m_jit.exceptionCheck();
480 m_jit.or32(TrustedImm32(ValueFalse), resultGPR);
481 jsValueResult(resultGPR, m_currentNode, DataFormatJSBoolean, UseChildrenCalledExplicitly);
483 GPRTemporary result(this, Reuse, arg2);
484 GPRReg resultGPR = result.gpr();
489 if (!isKnownInteger(node->child1().node()))
490 slowPath.append(m_jit.branch64(MacroAssembler::Below, arg1GPR, GPRInfo::tagTypeNumberRegister));
491 if (!isKnownInteger(node->child2().node()))
492 slowPath.append(m_jit.branch64(MacroAssembler::Below, arg2GPR, GPRInfo::tagTypeNumberRegister));
494 m_jit.compare32(cond, arg1GPR, arg2GPR, resultGPR);
495 m_jit.or32(TrustedImm32(ValueFalse), resultGPR);
497 if (!isKnownInteger(node->child1().node()) || !isKnownInteger(node->child2().node())) {
498 addSlowPathGenerator(std::make_unique<CompareAndBoxBooleanSlowPathGenerator<JITCompiler::JumpList>>(
499 slowPath, this, helperFunction, resultGPR, arg1GPR, arg2GPR));
502 jsValueResult(resultGPR, m_currentNode, DataFormatJSBoolean, UseChildrenCalledExplicitly);
506 void SpeculativeJIT::nonSpeculativePeepholeStrictEq(Node* node, Node* branchNode, bool invert)
508 BasicBlock* taken = branchNode->branchData()->taken.block;
509 BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
511 // The branch instruction will branch to the taken block.
512 // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
513 if (taken == nextBlock()) {
515 BasicBlock* tmp = taken;
520 JSValueOperand arg1(this, node->child1());
521 JSValueOperand arg2(this, node->child2());
522 GPRReg arg1GPR = arg1.gpr();
523 GPRReg arg2GPR = arg2.gpr();
525 GPRTemporary result(this);
526 GPRReg resultGPR = result.gpr();
531 if (isKnownCell(node->child1().node()) && isKnownCell(node->child2().node())) {
532 // see if we get lucky: if the arguments are cells and they reference the same
533 // cell, then they must be strictly equal.
534 branch64(JITCompiler::Equal, arg1GPR, arg2GPR, invert ? notTaken : taken);
536 silentSpillAllRegisters(resultGPR);
537 callOperation(operationCompareStrictEqCell, resultGPR, arg1GPR, arg2GPR);
538 silentFillAllRegisters();
539 m_jit.exceptionCheck();
541 branchTest32(invert ? JITCompiler::Zero : JITCompiler::NonZero, resultGPR, taken);
543 m_jit.or64(arg1GPR, arg2GPR, resultGPR);
545 JITCompiler::Jump twoCellsCase = m_jit.branchTest64(JITCompiler::Zero, resultGPR, GPRInfo::tagMaskRegister);
547 JITCompiler::Jump leftOK = m_jit.branch64(JITCompiler::AboveOrEqual, arg1GPR, GPRInfo::tagTypeNumberRegister);
548 JITCompiler::Jump leftDouble = m_jit.branchTest64(JITCompiler::NonZero, arg1GPR, GPRInfo::tagTypeNumberRegister);
550 JITCompiler::Jump rightOK = m_jit.branch64(JITCompiler::AboveOrEqual, arg2GPR, GPRInfo::tagTypeNumberRegister);
551 JITCompiler::Jump rightDouble = m_jit.branchTest64(JITCompiler::NonZero, arg2GPR, GPRInfo::tagTypeNumberRegister);
552 rightOK.link(&m_jit);
554 branch64(invert ? JITCompiler::NotEqual : JITCompiler::Equal, arg1GPR, arg2GPR, taken);
555 jump(notTaken, ForceJump);
557 twoCellsCase.link(&m_jit);
558 branch64(JITCompiler::Equal, arg1GPR, arg2GPR, invert ? notTaken : taken);
560 leftDouble.link(&m_jit);
561 rightDouble.link(&m_jit);
563 silentSpillAllRegisters(resultGPR);
564 callOperation(operationCompareStrictEq, resultGPR, arg1GPR, arg2GPR);
565 silentFillAllRegisters();
566 m_jit.exceptionCheck();
568 branchTest32(invert ? JITCompiler::Zero : JITCompiler::NonZero, resultGPR, taken);
574 void SpeculativeJIT::nonSpeculativeNonPeepholeStrictEq(Node* node, bool invert)
576 JSValueOperand arg1(this, node->child1());
577 JSValueOperand arg2(this, node->child2());
578 GPRReg arg1GPR = arg1.gpr();
579 GPRReg arg2GPR = arg2.gpr();
581 GPRTemporary result(this);
582 GPRReg resultGPR = result.gpr();
587 if (isKnownCell(node->child1().node()) && isKnownCell(node->child2().node())) {
588 // see if we get lucky: if the arguments are cells and they reference the same
589 // cell, then they must be strictly equal.
590 // FIXME: this should flush registers instead of silent spill/fill.
591 JITCompiler::Jump notEqualCase = m_jit.branch64(JITCompiler::NotEqual, arg1GPR, arg2GPR);
593 m_jit.move(JITCompiler::TrustedImm64(JSValue::encode(jsBoolean(!invert))), resultGPR);
595 JITCompiler::Jump done = m_jit.jump();
597 notEqualCase.link(&m_jit);
599 silentSpillAllRegisters(resultGPR);
600 callOperation(operationCompareStrictEqCell, resultGPR, arg1GPR, arg2GPR);
601 silentFillAllRegisters();
602 m_jit.exceptionCheck();
604 m_jit.and64(JITCompiler::TrustedImm32(1), resultGPR);
605 m_jit.or32(JITCompiler::TrustedImm32(ValueFalse), resultGPR);
609 m_jit.or64(arg1GPR, arg2GPR, resultGPR);
611 JITCompiler::JumpList slowPathCases;
613 JITCompiler::Jump twoCellsCase = m_jit.branchTest64(JITCompiler::Zero, resultGPR, GPRInfo::tagMaskRegister);
615 JITCompiler::Jump leftOK = m_jit.branch64(JITCompiler::AboveOrEqual, arg1GPR, GPRInfo::tagTypeNumberRegister);
616 slowPathCases.append(m_jit.branchTest64(JITCompiler::NonZero, arg1GPR, GPRInfo::tagTypeNumberRegister));
618 JITCompiler::Jump rightOK = m_jit.branch64(JITCompiler::AboveOrEqual, arg2GPR, GPRInfo::tagTypeNumberRegister);
619 slowPathCases.append(m_jit.branchTest64(JITCompiler::NonZero, arg2GPR, GPRInfo::tagTypeNumberRegister));
620 rightOK.link(&m_jit);
622 m_jit.compare64(invert ? JITCompiler::NotEqual : JITCompiler::Equal, arg1GPR, arg2GPR, resultGPR);
623 m_jit.or32(JITCompiler::TrustedImm32(ValueFalse), resultGPR);
625 JITCompiler::Jump done = m_jit.jump();
627 twoCellsCase.link(&m_jit);
628 slowPathCases.append(m_jit.branch64(JITCompiler::NotEqual, arg1GPR, arg2GPR));
630 m_jit.move(JITCompiler::TrustedImm64(JSValue::encode(jsBoolean(!invert))), resultGPR);
632 addSlowPathGenerator(std::make_unique<CompareAndBoxBooleanSlowPathGenerator<MacroAssembler::JumpList>>(
633 slowPathCases, this, operationCompareStrictEq, resultGPR, arg1GPR,
639 jsValueResult(resultGPR, m_currentNode, DataFormatJSBoolean, UseChildrenCalledExplicitly);
642 void SpeculativeJIT::compileMiscStrictEq(Node* node)
644 JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
645 JSValueOperand op2(this, node->child2(), ManualOperandSpeculation);
646 GPRTemporary result(this);
648 if (node->child1().useKind() == MiscUse)
649 speculateMisc(node->child1(), op1.jsValueRegs());
650 if (node->child2().useKind() == MiscUse)
651 speculateMisc(node->child2(), op2.jsValueRegs());
653 m_jit.compare64(JITCompiler::Equal, op1.gpr(), op2.gpr(), result.gpr());
654 m_jit.or32(TrustedImm32(ValueFalse), result.gpr());
655 jsValueResult(result.gpr(), node, DataFormatJSBoolean);
658 void SpeculativeJIT::emitCall(Node* node)
660 CallLinkInfo::CallType callType;
661 bool isVarargs = false;
662 bool isForwardVarargs = false;
664 bool isEmulatedTail = false;
665 bool isDirect = false;
666 switch (node->op()) {
669 callType = CallLinkInfo::Call;
672 callType = CallLinkInfo::TailCall;
675 case TailCallInlinedCaller:
676 callType = CallLinkInfo::Call;
677 isEmulatedTail = true;
680 callType = CallLinkInfo::Construct;
683 callType = CallLinkInfo::CallVarargs;
686 case TailCallVarargs:
687 callType = CallLinkInfo::TailCallVarargs;
691 case TailCallVarargsInlinedCaller:
692 callType = CallLinkInfo::CallVarargs;
694 isEmulatedTail = true;
696 case ConstructVarargs:
697 callType = CallLinkInfo::ConstructVarargs;
700 case CallForwardVarargs:
701 callType = CallLinkInfo::CallVarargs;
702 isForwardVarargs = true;
704 case ConstructForwardVarargs:
705 callType = CallLinkInfo::ConstructVarargs;
706 isForwardVarargs = true;
708 case TailCallForwardVarargs:
709 callType = CallLinkInfo::TailCallVarargs;
711 isForwardVarargs = true;
713 case TailCallForwardVarargsInlinedCaller:
714 callType = CallLinkInfo::CallVarargs;
715 isEmulatedTail = true;
716 isForwardVarargs = true;
719 callType = CallLinkInfo::DirectCall;
722 case DirectConstruct:
723 callType = CallLinkInfo::DirectConstruct;
727 callType = CallLinkInfo::DirectTailCall;
731 case DirectTailCallInlinedCaller:
732 callType = CallLinkInfo::DirectCall;
733 isEmulatedTail = true;
737 DFG_CRASH(m_jit.graph(), node, "bad node type");
741 GPRReg calleeGPR = InvalidGPRReg;
742 CallFrameShuffleData shuffleData;
744 ExecutableBase* executable = nullptr;
745 FunctionExecutable* functionExecutable = nullptr;
747 executable = node->castOperand<ExecutableBase*>();
748 functionExecutable = jsDynamicCast<FunctionExecutable*>(*m_jit.vm(), executable);
751 unsigned numPassedArgs = 0;
752 unsigned numAllocatedArgs = 0;
754 // Gotta load the arguments somehow. Varargs is trickier.
755 if (isVarargs || isForwardVarargs) {
756 RELEASE_ASSERT(!isDirect);
757 CallVarargsData* data = node->callVarargsData();
759 unsigned numUsedStackSlots = m_jit.graph().m_nextMachineLocal;
761 if (isForwardVarargs) {
770 scratchGPR1 = JITCompiler::selectScratchGPR();
771 scratchGPR2 = JITCompiler::selectScratchGPR(scratchGPR1);
772 scratchGPR3 = JITCompiler::selectScratchGPR(scratchGPR1, scratchGPR2);
774 m_jit.move(TrustedImm32(numUsedStackSlots), scratchGPR2);
775 JITCompiler::JumpList slowCase;
776 InlineCallFrame* inlineCallFrame;
778 inlineCallFrame = node->child3()->origin.semantic.inlineCallFrame;
780 inlineCallFrame = node->origin.semantic.inlineCallFrame;
781 // emitSetupVarargsFrameFastCase modifies the stack pointer if it succeeds.
782 emitSetupVarargsFrameFastCase(*m_jit.vm(), m_jit, scratchGPR2, scratchGPR1, scratchGPR2, scratchGPR3, inlineCallFrame, data->firstVarArgOffset, slowCase);
783 JITCompiler::Jump done = m_jit.jump();
784 slowCase.link(&m_jit);
785 callOperation(operationThrowStackOverflowForVarargs);
786 m_jit.exceptionCheck();
787 m_jit.abortWithReason(DFGVarargsThrowingPathDidNotThrow);
795 auto loadArgumentsGPR = [&] (GPRReg reservedGPR) {
796 if (reservedGPR != InvalidGPRReg)
798 JSValueOperand arguments(this, node->child3());
799 argumentsGPR = arguments.gpr();
800 if (reservedGPR != InvalidGPRReg)
804 scratchGPR1 = JITCompiler::selectScratchGPR(argumentsGPR, reservedGPR);
805 scratchGPR2 = JITCompiler::selectScratchGPR(argumentsGPR, scratchGPR1, reservedGPR);
806 scratchGPR3 = JITCompiler::selectScratchGPR(argumentsGPR, scratchGPR1, scratchGPR2, reservedGPR);
809 loadArgumentsGPR(InvalidGPRReg);
811 DFG_ASSERT(m_jit.graph(), node, isFlushed());
813 // Right now, arguments is in argumentsGPR and the register file is flushed.
814 callOperation(operationSizeFrameForVarargs, GPRInfo::returnValueGPR, argumentsGPR, numUsedStackSlots, data->firstVarArgOffset);
815 m_jit.exceptionCheck();
817 // Now we have the argument count of the callee frame, but we've lost the arguments operand.
818 // Reconstruct the arguments operand while preserving the callee frame.
819 loadArgumentsGPR(GPRInfo::returnValueGPR);
820 m_jit.move(TrustedImm32(numUsedStackSlots), scratchGPR1);
821 emitSetVarargsFrame(m_jit, GPRInfo::returnValueGPR, false, scratchGPR1, scratchGPR1);
822 m_jit.addPtr(TrustedImm32(-(sizeof(CallerFrameAndPC) + WTF::roundUpToMultipleOf(stackAlignmentBytes(), 5 * sizeof(void*)))), scratchGPR1, JITCompiler::stackPointerRegister);
824 callOperation(operationSetupVarargsFrame, GPRInfo::returnValueGPR, scratchGPR1, argumentsGPR, data->firstVarArgOffset, GPRInfo::returnValueGPR);
825 m_jit.exceptionCheck();
826 m_jit.addPtr(TrustedImm32(sizeof(CallerFrameAndPC)), GPRInfo::returnValueGPR, JITCompiler::stackPointerRegister);
829 DFG_ASSERT(m_jit.graph(), node, isFlushed());
831 // We don't need the arguments array anymore.
835 // Now set up the "this" argument.
836 JSValueOperand thisArgument(this, node->child2());
837 GPRReg thisArgumentGPR = thisArgument.gpr();
840 m_jit.store64(thisArgumentGPR, JITCompiler::calleeArgumentSlot(0));
842 // The call instruction's first child is the function; the subsequent children are the
844 numPassedArgs = node->numChildren() - 1;
845 numAllocatedArgs = numPassedArgs;
847 if (functionExecutable) {
848 // Allocate more args if this would let us avoid arity checks. This is throttled by
849 // CallLinkInfo's limit. It's probably good to throttle it - if the callee wants a
850 // ginormous amount of argument space then it's better for them to do it so that when we
851 // make calls to other things, we don't waste space.
852 unsigned desiredNumAllocatedArgs = static_cast<unsigned>(functionExecutable->parameterCount()) + 1;
853 if (desiredNumAllocatedArgs <= Options::maximumDirectCallStackSize()) {
854 numAllocatedArgs = std::max(numAllocatedArgs, desiredNumAllocatedArgs);
856 // Whoever converts to DirectCall should do this adjustment. It's too late for us to
857 // do this adjustment now since we will have already emitted code that relied on the
858 // value of m_parameterSlots.
861 Graph::parameterSlotsForArgCount(numAllocatedArgs)
862 <= m_jit.graph().m_parameterSlots);
867 Edge calleeEdge = m_jit.graph().child(node, 0);
868 JSValueOperand callee(this, calleeEdge);
869 calleeGPR = callee.gpr();
873 shuffleData.tagTypeNumber = GPRInfo::tagTypeNumberRegister;
874 shuffleData.numLocals = m_jit.graph().frameRegisterCount();
875 shuffleData.callee = ValueRecovery::inGPR(calleeGPR, DataFormatJS);
876 shuffleData.args.resize(numAllocatedArgs);
877 shuffleData.numPassedArgs = numPassedArgs;
879 for (unsigned i = 0; i < numPassedArgs; ++i) {
880 Edge argEdge = m_jit.graph().varArgChild(node, i + 1);
881 GenerationInfo& info = generationInfo(argEdge.node());
884 shuffleData.args[i] = info.recovery(argEdge->virtualRegister());
887 for (unsigned i = numPassedArgs; i < numAllocatedArgs; ++i)
888 shuffleData.args[i] = ValueRecovery::constant(jsUndefined());
890 shuffleData.setupCalleeSaveRegisters(m_jit.codeBlock());
892 m_jit.store32(MacroAssembler::TrustedImm32(numPassedArgs), JITCompiler::calleeFramePayloadSlot(CallFrameSlot::argumentCount));
894 for (unsigned i = 0; i < numPassedArgs; i++) {
895 Edge argEdge = m_jit.graph().m_varArgChildren[node->firstChild() + 1 + i];
896 JSValueOperand arg(this, argEdge);
897 GPRReg argGPR = arg.gpr();
900 m_jit.store64(argGPR, JITCompiler::calleeArgumentSlot(i));
903 for (unsigned i = numPassedArgs; i < numAllocatedArgs; ++i)
904 m_jit.storeTrustedValue(jsUndefined(), JITCompiler::calleeArgumentSlot(i));
908 if (!isTail || isVarargs || isForwardVarargs) {
909 Edge calleeEdge = m_jit.graph().child(node, 0);
910 JSValueOperand callee(this, calleeEdge);
911 calleeGPR = callee.gpr();
913 m_jit.store64(calleeGPR, JITCompiler::calleeFrameSlot(CallFrameSlot::callee));
918 CodeOrigin staticOrigin = node->origin.semantic;
919 ASSERT(!isTail || !staticOrigin.inlineCallFrame || !staticOrigin.inlineCallFrame->getCallerSkippingTailCalls());
920 ASSERT(!isEmulatedTail || (staticOrigin.inlineCallFrame && staticOrigin.inlineCallFrame->getCallerSkippingTailCalls()));
921 CodeOrigin dynamicOrigin =
922 isEmulatedTail ? *staticOrigin.inlineCallFrame->getCallerSkippingTailCalls() : staticOrigin;
924 CallSiteIndex callSite = m_jit.recordCallSiteAndGenerateExceptionHandlingOSRExitIfNeeded(dynamicOrigin, m_stream->size());
926 auto setResultAndResetStack = [&] () {
927 GPRFlushedCallResult result(this);
928 GPRReg resultGPR = result.gpr();
929 m_jit.move(GPRInfo::returnValueGPR, resultGPR);
931 jsValueResult(resultGPR, m_currentNode, DataFormatJS, UseChildrenCalledExplicitly);
933 // After the calls are done, we need to reestablish our stack
934 // pointer. We rely on this for varargs calls, calls with arity
935 // mismatch (the callframe is slided) and tail calls.
936 m_jit.addPtr(TrustedImm32(m_jit.graph().stackPointerOffset() * sizeof(Register)), GPRInfo::callFrameRegister, JITCompiler::stackPointerRegister);
939 CallLinkInfo* callLinkInfo = m_jit.codeBlock()->addCallLinkInfo();
940 callLinkInfo->setUpCall(callType, m_currentNode->origin.semantic, calleeGPR);
942 if (node->op() == CallEval) {
943 // We want to call operationCallEval but we don't want to overwrite the parameter area in
944 // which we have created a prototypical eval call frame. This means that we have to
945 // subtract stack to make room for the call. Lucky for us, at this point we have the whole
946 // register file to ourselves.
948 m_jit.emitStoreCallSiteIndex(callSite);
949 m_jit.addPtr(TrustedImm32(-static_cast<ptrdiff_t>(sizeof(CallerFrameAndPC))), JITCompiler::stackPointerRegister, GPRInfo::regT0);
950 m_jit.storePtr(GPRInfo::callFrameRegister, JITCompiler::Address(GPRInfo::regT0, CallFrame::callerFrameOffset()));
952 // Now we need to make room for:
953 // - The caller frame and PC of a call to operationCallEval.
954 // - Potentially two arguments on the stack.
955 unsigned requiredBytes = sizeof(CallerFrameAndPC) + sizeof(ExecState*) * 2;
956 requiredBytes = WTF::roundUpToMultipleOf(stackAlignmentBytes(), requiredBytes);
957 m_jit.subPtr(TrustedImm32(requiredBytes), JITCompiler::stackPointerRegister);
958 m_jit.setupArgumentsWithExecState(GPRInfo::regT0);
959 prepareForExternalCall();
960 m_jit.appendCall(operationCallEval);
961 m_jit.exceptionCheck();
962 JITCompiler::Jump done = m_jit.branchTest64(JITCompiler::NonZero, GPRInfo::returnValueGPR);
964 // This is the part where we meant to make a normal call. Oops.
965 m_jit.addPtr(TrustedImm32(requiredBytes), JITCompiler::stackPointerRegister);
966 m_jit.load64(JITCompiler::calleeFrameSlot(CallFrameSlot::callee), GPRInfo::regT0);
967 m_jit.emitDumbVirtualCall(*m_jit.vm(), callLinkInfo);
970 setResultAndResetStack();
975 callLinkInfo->setExecutableDuringCompilation(executable);
976 callLinkInfo->setMaxNumArguments(numAllocatedArgs);
979 RELEASE_ASSERT(node->op() == DirectTailCall);
981 JITCompiler::PatchableJump patchableJump = m_jit.patchableJump();
982 JITCompiler::Label mainPath = m_jit.label();
984 m_jit.emitStoreCallSiteIndex(callSite);
986 callLinkInfo->setFrameShuffleData(shuffleData);
987 CallFrameShuffler(m_jit, shuffleData).prepareForTailCall();
989 JITCompiler::Call call = m_jit.nearTailCall();
991 JITCompiler::Label slowPath = m_jit.label();
992 patchableJump.m_jump.linkTo(slowPath, &m_jit);
994 silentSpillAllRegisters(InvalidGPRReg);
995 callOperation(operationLinkDirectCall, callLinkInfo, calleeGPR);
996 silentFillAllRegisters();
997 m_jit.exceptionCheck();
998 m_jit.jump().linkTo(mainPath, &m_jit);
1002 m_jit.addJSDirectTailCall(patchableJump, call, slowPath, callLinkInfo);
1006 JITCompiler::Label mainPath = m_jit.label();
1008 m_jit.emitStoreCallSiteIndex(callSite);
1010 JITCompiler::Call call = m_jit.nearCall();
1011 JITCompiler::Jump done = m_jit.jump();
1013 JITCompiler::Label slowPath = m_jit.label();
1015 m_jit.pop(JITCompiler::selectScratchGPR(calleeGPR));
1017 callOperation(operationLinkDirectCall, callLinkInfo, calleeGPR);
1018 m_jit.exceptionCheck();
1019 m_jit.jump().linkTo(mainPath, &m_jit);
1023 setResultAndResetStack();
1025 m_jit.addJSDirectCall(call, slowPath, callLinkInfo);
1029 m_jit.emitStoreCallSiteIndex(callSite);
1031 JITCompiler::DataLabelPtr targetToCheck;
1032 JITCompiler::Jump slowPath = m_jit.branchPtrWithPatch(MacroAssembler::NotEqual, calleeGPR, targetToCheck, TrustedImmPtr(0));
1035 if (node->op() == TailCall) {
1036 callLinkInfo->setFrameShuffleData(shuffleData);
1037 CallFrameShuffler(m_jit, shuffleData).prepareForTailCall();
1039 m_jit.emitRestoreCalleeSaves();
1040 m_jit.prepareForTailCallSlow();
1044 JITCompiler::Call fastCall = isTail ? m_jit.nearTailCall() : m_jit.nearCall();
1046 JITCompiler::Jump done = m_jit.jump();
1048 slowPath.link(&m_jit);
1050 if (node->op() == TailCall) {
1051 CallFrameShuffler callFrameShuffler(m_jit, shuffleData);
1052 callFrameShuffler.setCalleeJSValueRegs(JSValueRegs(GPRInfo::regT0));
1053 callFrameShuffler.prepareForSlowPath();
1055 m_jit.move(calleeGPR, GPRInfo::regT0); // Callee needs to be in regT0
1058 m_jit.emitRestoreCalleeSaves(); // This needs to happen after we moved calleeGPR to regT0
1061 m_jit.move(TrustedImmPtr(callLinkInfo), GPRInfo::regT2); // Link info needs to be in regT2
1062 JITCompiler::Call slowCall = m_jit.nearCall();
1067 m_jit.abortWithReason(JITDidReturnFromTailCall);
1069 setResultAndResetStack();
1071 m_jit.addJSCall(fastCall, slowCall, targetToCheck, callLinkInfo);
1074 // Clang should allow unreachable [[clang::fallthrough]] in template functions if any template expansion uses it
1075 // http://llvm.org/bugs/show_bug.cgi?id=18619
1076 #if COMPILER(CLANG) && defined(__has_warning)
1077 #pragma clang diagnostic push
1078 #if __has_warning("-Wimplicit-fallthrough")
1079 #pragma clang diagnostic ignored "-Wimplicit-fallthrough"
1082 template<bool strict>
1083 GPRReg SpeculativeJIT::fillSpeculateInt32Internal(Edge edge, DataFormat& returnFormat)
1085 AbstractValue& value = m_state.forNode(edge);
1086 SpeculatedType type = value.m_type;
1087 ASSERT(edge.useKind() != KnownInt32Use || !(value.m_type & ~SpecInt32Only));
1089 m_interpreter.filter(value, SpecInt32Only);
1090 if (value.isClear()) {
1091 terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
1092 returnFormat = DataFormatInt32;
1096 VirtualRegister virtualRegister = edge->virtualRegister();
1097 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1099 switch (info.registerFormat()) {
1100 case DataFormatNone: {
1101 GPRReg gpr = allocate();
1103 if (edge->hasConstant()) {
1104 m_gprs.retain(gpr, virtualRegister, SpillOrderConstant);
1105 ASSERT(edge->isInt32Constant());
1106 m_jit.move(MacroAssembler::Imm32(edge->asInt32()), gpr);
1107 info.fillInt32(*m_stream, gpr);
1108 returnFormat = DataFormatInt32;
1112 DataFormat spillFormat = info.spillFormat();
1114 DFG_ASSERT(m_jit.graph(), m_currentNode, (spillFormat & DataFormatJS) || spillFormat == DataFormatInt32);
1116 m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
1118 if (spillFormat == DataFormatJSInt32 || spillFormat == DataFormatInt32) {
1119 // If we know this was spilled as an integer we can fill without checking.
1121 m_jit.load32(JITCompiler::addressFor(virtualRegister), gpr);
1122 info.fillInt32(*m_stream, gpr);
1123 returnFormat = DataFormatInt32;
1126 if (spillFormat == DataFormatInt32) {
1127 m_jit.load32(JITCompiler::addressFor(virtualRegister), gpr);
1128 info.fillInt32(*m_stream, gpr);
1129 returnFormat = DataFormatInt32;
1131 m_jit.load64(JITCompiler::addressFor(virtualRegister), gpr);
1132 info.fillJSValue(*m_stream, gpr, DataFormatJSInt32);
1133 returnFormat = DataFormatJSInt32;
1137 m_jit.load64(JITCompiler::addressFor(virtualRegister), gpr);
1139 // Fill as JSValue, and fall through.
1140 info.fillJSValue(*m_stream, gpr, DataFormatJSInt32);
1145 case DataFormatJS: {
1146 DFG_ASSERT(m_jit.graph(), m_currentNode, !(type & SpecInt52Only));
1147 // Check the value is an integer.
1148 GPRReg gpr = info.gpr();
1150 if (type & ~SpecInt32Only)
1151 speculationCheck(BadType, JSValueRegs(gpr), edge, m_jit.branch64(MacroAssembler::Below, gpr, GPRInfo::tagTypeNumberRegister));
1152 info.fillJSValue(*m_stream, gpr, DataFormatJSInt32);
1153 // If !strict we're done, return.
1155 returnFormat = DataFormatJSInt32;
1158 // else fall through & handle as DataFormatJSInt32.
1163 case DataFormatJSInt32: {
1164 // In a strict fill we need to strip off the value tag.
1166 GPRReg gpr = info.gpr();
1168 // If the register has already been locked we need to take a copy.
1169 // If not, we'll zero extend in place, so mark on the info that this is now type DataFormatInt32, not DataFormatJSInt32.
1170 if (m_gprs.isLocked(gpr))
1171 result = allocate();
1174 info.fillInt32(*m_stream, gpr);
1177 m_jit.zeroExtend32ToPtr(gpr, result);
1178 returnFormat = DataFormatInt32;
1182 GPRReg gpr = info.gpr();
1184 returnFormat = DataFormatJSInt32;
1188 case DataFormatInt32: {
1189 GPRReg gpr = info.gpr();
1191 returnFormat = DataFormatInt32;
1195 case DataFormatJSDouble:
1196 case DataFormatCell:
1197 case DataFormatBoolean:
1198 case DataFormatJSCell:
1199 case DataFormatJSBoolean:
1200 case DataFormatDouble:
1201 case DataFormatStorage:
1202 case DataFormatInt52:
1203 case DataFormatStrictInt52:
1204 DFG_CRASH(m_jit.graph(), m_currentNode, "Bad data format");
1207 DFG_CRASH(m_jit.graph(), m_currentNode, "Corrupt data format");
1208 return InvalidGPRReg;
1211 #if COMPILER(CLANG) && defined(__has_warning)
1212 #pragma clang diagnostic pop
1215 GPRReg SpeculativeJIT::fillSpeculateInt32(Edge edge, DataFormat& returnFormat)
1217 return fillSpeculateInt32Internal<false>(edge, returnFormat);
1220 GPRReg SpeculativeJIT::fillSpeculateInt32Strict(Edge edge)
1222 DataFormat mustBeDataFormatInt32;
1223 GPRReg result = fillSpeculateInt32Internal<true>(edge, mustBeDataFormatInt32);
1224 DFG_ASSERT(m_jit.graph(), m_currentNode, mustBeDataFormatInt32 == DataFormatInt32);
1228 GPRReg SpeculativeJIT::fillSpeculateInt52(Edge edge, DataFormat desiredFormat)
1230 ASSERT(desiredFormat == DataFormatInt52 || desiredFormat == DataFormatStrictInt52);
1231 AbstractValue& value = m_state.forNode(edge);
1233 m_interpreter.filter(value, SpecAnyInt);
1234 if (value.isClear()) {
1235 terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
1239 VirtualRegister virtualRegister = edge->virtualRegister();
1240 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1242 switch (info.registerFormat()) {
1243 case DataFormatNone: {
1244 GPRReg gpr = allocate();
1246 if (edge->hasConstant()) {
1247 JSValue jsValue = edge->asJSValue();
1248 ASSERT(jsValue.isAnyInt());
1249 m_gprs.retain(gpr, virtualRegister, SpillOrderConstant);
1250 int64_t value = jsValue.asAnyInt();
1251 if (desiredFormat == DataFormatInt52)
1252 value = value << JSValue::int52ShiftAmount;
1253 m_jit.move(MacroAssembler::Imm64(value), gpr);
1254 info.fillGPR(*m_stream, gpr, desiredFormat);
1258 DataFormat spillFormat = info.spillFormat();
1260 DFG_ASSERT(m_jit.graph(), m_currentNode, spillFormat == DataFormatInt52 || spillFormat == DataFormatStrictInt52);
1262 m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
1264 m_jit.load64(JITCompiler::addressFor(virtualRegister), gpr);
1265 if (desiredFormat == DataFormatStrictInt52) {
1266 if (spillFormat == DataFormatInt52)
1267 m_jit.rshift64(TrustedImm32(JSValue::int52ShiftAmount), gpr);
1268 info.fillStrictInt52(*m_stream, gpr);
1271 if (spillFormat == DataFormatStrictInt52)
1272 m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), gpr);
1273 info.fillInt52(*m_stream, gpr);
1277 case DataFormatStrictInt52: {
1278 GPRReg gpr = info.gpr();
1279 bool wasLocked = m_gprs.isLocked(gpr);
1281 if (desiredFormat == DataFormatStrictInt52)
1284 GPRReg result = allocate();
1285 m_jit.move(gpr, result);
1289 info.fillInt52(*m_stream, gpr);
1290 m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), gpr);
1294 case DataFormatInt52: {
1295 GPRReg gpr = info.gpr();
1296 bool wasLocked = m_gprs.isLocked(gpr);
1298 if (desiredFormat == DataFormatInt52)
1301 GPRReg result = allocate();
1302 m_jit.move(gpr, result);
1306 info.fillStrictInt52(*m_stream, gpr);
1307 m_jit.rshift64(TrustedImm32(JSValue::int52ShiftAmount), gpr);
1312 DFG_CRASH(m_jit.graph(), m_currentNode, "Bad data format");
1313 return InvalidGPRReg;
1317 FPRReg SpeculativeJIT::fillSpeculateDouble(Edge edge)
1319 ASSERT(edge.useKind() == DoubleRepUse || edge.useKind() == DoubleRepRealUse || edge.useKind() == DoubleRepAnyIntUse);
1320 ASSERT(edge->hasDoubleResult());
1321 VirtualRegister virtualRegister = edge->virtualRegister();
1322 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1324 if (info.registerFormat() == DataFormatNone) {
1325 if (edge->hasConstant()) {
1326 if (edge->isNumberConstant()) {
1327 FPRReg fpr = fprAllocate();
1328 int64_t doubleAsInt = reinterpretDoubleToInt64(edge->asNumber());
1330 m_jit.moveZeroToDouble(fpr);
1332 GPRReg gpr = allocate();
1333 m_jit.move(MacroAssembler::Imm64(doubleAsInt), gpr);
1334 m_jit.move64ToDouble(gpr, fpr);
1338 m_fprs.retain(fpr, virtualRegister, SpillOrderDouble);
1339 info.fillDouble(*m_stream, fpr);
1342 terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
1343 return fprAllocate();
1346 DataFormat spillFormat = info.spillFormat();
1347 if (spillFormat != DataFormatDouble) {
1349 m_jit.graph(), m_currentNode, toCString(
1350 "Expected ", edge, " to have double format but instead it is spilled as ",
1351 dataFormatToString(spillFormat)).data());
1353 DFG_ASSERT(m_jit.graph(), m_currentNode, spillFormat == DataFormatDouble);
1354 FPRReg fpr = fprAllocate();
1355 m_jit.loadDouble(JITCompiler::addressFor(virtualRegister), fpr);
1356 m_fprs.retain(fpr, virtualRegister, SpillOrderDouble);
1357 info.fillDouble(*m_stream, fpr);
1361 DFG_ASSERT(m_jit.graph(), m_currentNode, info.registerFormat() == DataFormatDouble);
1362 FPRReg fpr = info.fpr();
1367 GPRReg SpeculativeJIT::fillSpeculateCell(Edge edge)
1369 AbstractValue& value = m_state.forNode(edge);
1370 SpeculatedType type = value.m_type;
1371 ASSERT((edge.useKind() != KnownCellUse && edge.useKind() != KnownStringUse) || !(value.m_type & ~SpecCellCheck));
1373 m_interpreter.filter(value, SpecCellCheck);
1374 if (value.isClear()) {
1375 terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
1379 VirtualRegister virtualRegister = edge->virtualRegister();
1380 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1382 switch (info.registerFormat()) {
1383 case DataFormatNone: {
1384 GPRReg gpr = allocate();
1386 if (edge->hasConstant()) {
1387 JSValue jsValue = edge->asJSValue();
1388 m_gprs.retain(gpr, virtualRegister, SpillOrderConstant);
1389 m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsValue)), gpr);
1390 info.fillJSValue(*m_stream, gpr, DataFormatJSCell);
1394 m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
1395 m_jit.load64(JITCompiler::addressFor(virtualRegister), gpr);
1397 info.fillJSValue(*m_stream, gpr, DataFormatJS);
1398 if (type & ~SpecCellCheck)
1399 speculationCheck(BadType, JSValueRegs(gpr), edge, m_jit.branchIfNotCell(JSValueRegs(gpr)));
1400 info.fillJSValue(*m_stream, gpr, DataFormatJSCell);
1404 case DataFormatCell:
1405 case DataFormatJSCell: {
1406 GPRReg gpr = info.gpr();
1408 if (!ASSERT_DISABLED) {
1409 MacroAssembler::Jump checkCell = m_jit.branchIfCell(JSValueRegs(gpr));
1410 m_jit.abortWithReason(DFGIsNotCell);
1411 checkCell.link(&m_jit);
1416 case DataFormatJS: {
1417 GPRReg gpr = info.gpr();
1419 if (type & ~SpecCellCheck)
1420 speculationCheck(BadType, JSValueRegs(gpr), edge, m_jit.branchIfNotCell(JSValueRegs(gpr)));
1421 info.fillJSValue(*m_stream, gpr, DataFormatJSCell);
1425 case DataFormatJSInt32:
1426 case DataFormatInt32:
1427 case DataFormatJSDouble:
1428 case DataFormatJSBoolean:
1429 case DataFormatBoolean:
1430 case DataFormatDouble:
1431 case DataFormatStorage:
1432 case DataFormatInt52:
1433 case DataFormatStrictInt52:
1434 DFG_CRASH(m_jit.graph(), m_currentNode, "Bad data format");
1437 DFG_CRASH(m_jit.graph(), m_currentNode, "Corrupt data format");
1438 return InvalidGPRReg;
1442 GPRReg SpeculativeJIT::fillSpeculateBoolean(Edge edge)
1444 AbstractValue& value = m_state.forNode(edge);
1445 SpeculatedType type = value.m_type;
1446 ASSERT(edge.useKind() != KnownBooleanUse || !(value.m_type & ~SpecBoolean));
1448 m_interpreter.filter(value, SpecBoolean);
1449 if (value.isClear()) {
1450 terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
1454 VirtualRegister virtualRegister = edge->virtualRegister();
1455 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1457 switch (info.registerFormat()) {
1458 case DataFormatNone: {
1459 GPRReg gpr = allocate();
1461 if (edge->hasConstant()) {
1462 JSValue jsValue = edge->asJSValue();
1463 m_gprs.retain(gpr, virtualRegister, SpillOrderConstant);
1464 m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsValue)), gpr);
1465 info.fillJSValue(*m_stream, gpr, DataFormatJSBoolean);
1468 DFG_ASSERT(m_jit.graph(), m_currentNode, info.spillFormat() & DataFormatJS);
1469 m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
1470 m_jit.load64(JITCompiler::addressFor(virtualRegister), gpr);
1472 info.fillJSValue(*m_stream, gpr, DataFormatJS);
1473 if (type & ~SpecBoolean) {
1474 m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), gpr);
1475 speculationCheck(BadType, JSValueRegs(gpr), edge, m_jit.branchTest64(MacroAssembler::NonZero, gpr, TrustedImm32(static_cast<int32_t>(~1))), SpeculationRecovery(BooleanSpeculationCheck, gpr, InvalidGPRReg));
1476 m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), gpr);
1478 info.fillJSValue(*m_stream, gpr, DataFormatJSBoolean);
1482 case DataFormatBoolean:
1483 case DataFormatJSBoolean: {
1484 GPRReg gpr = info.gpr();
1489 case DataFormatJS: {
1490 GPRReg gpr = info.gpr();
1492 if (type & ~SpecBoolean) {
1493 m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), gpr);
1494 speculationCheck(BadType, JSValueRegs(gpr), edge, m_jit.branchTest64(MacroAssembler::NonZero, gpr, TrustedImm32(static_cast<int32_t>(~1))), SpeculationRecovery(BooleanSpeculationCheck, gpr, InvalidGPRReg));
1495 m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), gpr);
1497 info.fillJSValue(*m_stream, gpr, DataFormatJSBoolean);
1501 case DataFormatJSInt32:
1502 case DataFormatInt32:
1503 case DataFormatJSDouble:
1504 case DataFormatJSCell:
1505 case DataFormatCell:
1506 case DataFormatDouble:
1507 case DataFormatStorage:
1508 case DataFormatInt52:
1509 case DataFormatStrictInt52:
1510 DFG_CRASH(m_jit.graph(), m_currentNode, "Bad data format");
1513 DFG_CRASH(m_jit.graph(), m_currentNode, "Corrupt data format");
1514 return InvalidGPRReg;
1518 void SpeculativeJIT::compileObjectEquality(Node* node)
1520 SpeculateCellOperand op1(this, node->child1());
1521 SpeculateCellOperand op2(this, node->child2());
1522 GPRTemporary result(this, Reuse, op1);
1524 GPRReg op1GPR = op1.gpr();
1525 GPRReg op2GPR = op2.gpr();
1526 GPRReg resultGPR = result.gpr();
1528 if (masqueradesAsUndefinedWatchpointIsStillValid()) {
1530 JSValueSource::unboxedCell(op1GPR), node->child1(), SpecObject, m_jit.branchIfNotObject(op1GPR));
1532 JSValueSource::unboxedCell(op2GPR), node->child2(), SpecObject, m_jit.branchIfNotObject(op2GPR));
1535 JSValueSource::unboxedCell(op1GPR), node->child1(), SpecObject, m_jit.branchIfNotObject(op1GPR));
1536 speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1538 MacroAssembler::NonZero,
1539 MacroAssembler::Address(op1GPR, JSCell::typeInfoFlagsOffset()),
1540 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1543 JSValueSource::unboxedCell(op2GPR), node->child2(), SpecObject, m_jit.branchIfNotObject(op2GPR));
1544 speculationCheck(BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1546 MacroAssembler::NonZero,
1547 MacroAssembler::Address(op2GPR, JSCell::typeInfoFlagsOffset()),
1548 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1551 m_jit.compare64(MacroAssembler::Equal, op1GPR, op2GPR, resultGPR);
1552 m_jit.or32(TrustedImm32(ValueFalse), resultGPR);
1553 jsValueResult(resultGPR, m_currentNode, DataFormatJSBoolean);
1556 void SpeculativeJIT::compileObjectStrictEquality(Edge objectChild, Edge otherChild)
1558 SpeculateCellOperand op1(this, objectChild);
1559 JSValueOperand op2(this, otherChild);
1560 GPRTemporary result(this);
1562 GPRReg op1GPR = op1.gpr();
1563 GPRReg op2GPR = op2.gpr();
1564 GPRReg resultGPR = result.gpr();
1566 DFG_TYPE_CHECK(JSValueSource::unboxedCell(op1GPR), objectChild, SpecObject, m_jit.branchIfNotObject(op1GPR));
1568 // At this point we know that we can perform a straight-forward equality comparison on pointer
1569 // values because we are doing strict equality.
1570 m_jit.compare64(MacroAssembler::Equal, op1GPR, op2GPR, resultGPR);
1571 m_jit.or32(TrustedImm32(ValueFalse), resultGPR);
1572 jsValueResult(resultGPR, m_currentNode, DataFormatJSBoolean);
1575 void SpeculativeJIT::compilePeepHoleObjectStrictEquality(Edge objectChild, Edge otherChild, Node* branchNode)
1577 BasicBlock* taken = branchNode->branchData()->taken.block;
1578 BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1580 SpeculateCellOperand op1(this, objectChild);
1581 JSValueOperand op2(this, otherChild);
1583 GPRReg op1GPR = op1.gpr();
1584 GPRReg op2GPR = op2.gpr();
1586 DFG_TYPE_CHECK(JSValueSource::unboxedCell(op1GPR), objectChild, SpecObject, m_jit.branchIfNotObject(op1GPR));
1588 if (taken == nextBlock()) {
1589 branchPtr(MacroAssembler::NotEqual, op1GPR, op2GPR, notTaken);
1592 branchPtr(MacroAssembler::Equal, op1GPR, op2GPR, taken);
1597 void SpeculativeJIT::compileObjectToObjectOrOtherEquality(Edge leftChild, Edge rightChild)
1599 SpeculateCellOperand op1(this, leftChild);
1600 JSValueOperand op2(this, rightChild, ManualOperandSpeculation);
1601 GPRTemporary result(this);
1603 GPRReg op1GPR = op1.gpr();
1604 GPRReg op2GPR = op2.gpr();
1605 GPRReg resultGPR = result.gpr();
1607 bool masqueradesAsUndefinedWatchpointValid =
1608 masqueradesAsUndefinedWatchpointIsStillValid();
1610 if (masqueradesAsUndefinedWatchpointValid) {
1612 JSValueSource::unboxedCell(op1GPR), leftChild, SpecObject, m_jit.branchIfNotObject(op1GPR));
1615 JSValueSource::unboxedCell(op1GPR), leftChild, SpecObject, m_jit.branchIfNotObject(op1GPR));
1616 speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), leftChild,
1618 MacroAssembler::NonZero,
1619 MacroAssembler::Address(op1GPR, JSCell::typeInfoFlagsOffset()),
1620 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1623 // It seems that most of the time when programs do a == b where b may be either null/undefined
1624 // or an object, b is usually an object. Balance the branches to make that case fast.
1625 MacroAssembler::Jump rightNotCell = m_jit.branchIfNotCell(JSValueRegs(op2GPR));
1627 // We know that within this branch, rightChild must be a cell.
1628 if (masqueradesAsUndefinedWatchpointValid) {
1630 JSValueRegs(op2GPR), rightChild, (~SpecCellCheck) | SpecObject, m_jit.branchIfNotObject(op2GPR));
1633 JSValueRegs(op2GPR), rightChild, (~SpecCellCheck) | SpecObject, m_jit.branchIfNotObject(op2GPR));
1634 speculationCheck(BadType, JSValueRegs(op2GPR), rightChild,
1636 MacroAssembler::NonZero,
1637 MacroAssembler::Address(op2GPR, JSCell::typeInfoFlagsOffset()),
1638 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1641 // At this point we know that we can perform a straight-forward equality comparison on pointer
1642 // values because both left and right are pointers to objects that have no special equality
1644 m_jit.compare64(MacroAssembler::Equal, op1GPR, op2GPR, resultGPR);
1645 MacroAssembler::Jump done = m_jit.jump();
1647 rightNotCell.link(&m_jit);
1649 // We know that within this branch, rightChild must not be a cell. Check if that is enough to
1650 // prove that it is either null or undefined.
1651 if (needsTypeCheck(rightChild, SpecCellCheck | SpecOther)) {
1652 m_jit.move(op2GPR, resultGPR);
1653 m_jit.and64(MacroAssembler::TrustedImm32(~TagBitUndefined), resultGPR);
1656 JSValueRegs(op2GPR), rightChild, SpecCellCheck | SpecOther,
1658 MacroAssembler::NotEqual, resultGPR,
1659 MacroAssembler::TrustedImm64(ValueNull)));
1661 m_jit.move(TrustedImm32(0), result.gpr());
1664 m_jit.or32(TrustedImm32(ValueFalse), resultGPR);
1665 jsValueResult(resultGPR, m_currentNode, DataFormatJSBoolean);
1668 void SpeculativeJIT::compilePeepHoleObjectToObjectOrOtherEquality(Edge leftChild, Edge rightChild, Node* branchNode)
1670 BasicBlock* taken = branchNode->branchData()->taken.block;
1671 BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1673 SpeculateCellOperand op1(this, leftChild);
1674 JSValueOperand op2(this, rightChild, ManualOperandSpeculation);
1675 GPRTemporary result(this);
1677 GPRReg op1GPR = op1.gpr();
1678 GPRReg op2GPR = op2.gpr();
1679 GPRReg resultGPR = result.gpr();
1681 bool masqueradesAsUndefinedWatchpointValid =
1682 masqueradesAsUndefinedWatchpointIsStillValid();
1684 if (masqueradesAsUndefinedWatchpointValid) {
1686 JSValueSource::unboxedCell(op1GPR), leftChild, SpecObject, m_jit.branchIfNotObject(op1GPR));
1689 JSValueSource::unboxedCell(op1GPR), leftChild, SpecObject, m_jit.branchIfNotObject(op1GPR));
1690 speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), leftChild,
1692 MacroAssembler::NonZero,
1693 MacroAssembler::Address(op1GPR, JSCell::typeInfoFlagsOffset()),
1694 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1697 // It seems that most of the time when programs do a == b where b may be either null/undefined
1698 // or an object, b is usually an object. Balance the branches to make that case fast.
1699 MacroAssembler::Jump rightNotCell = m_jit.branchIfNotCell(JSValueRegs(op2GPR));
1701 // We know that within this branch, rightChild must be a cell.
1702 if (masqueradesAsUndefinedWatchpointValid) {
1704 JSValueRegs(op2GPR), rightChild, (~SpecCellCheck) | SpecObject, m_jit.branchIfNotObject(op2GPR));
1707 JSValueRegs(op2GPR), rightChild, (~SpecCellCheck) | SpecObject, m_jit.branchIfNotObject(op2GPR));
1708 speculationCheck(BadType, JSValueRegs(op2GPR), rightChild,
1710 MacroAssembler::NonZero,
1711 MacroAssembler::Address(op2GPR, JSCell::typeInfoFlagsOffset()),
1712 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1715 // At this point we know that we can perform a straight-forward equality comparison on pointer
1716 // values because both left and right are pointers to objects that have no special equality
1718 branch64(MacroAssembler::Equal, op1GPR, op2GPR, taken);
1720 // We know that within this branch, rightChild must not be a cell. Check if that is enough to
1721 // prove that it is either null or undefined.
1722 if (!needsTypeCheck(rightChild, SpecCellCheck | SpecOther))
1723 rightNotCell.link(&m_jit);
1725 jump(notTaken, ForceJump);
1727 rightNotCell.link(&m_jit);
1728 m_jit.move(op2GPR, resultGPR);
1729 m_jit.and64(MacroAssembler::TrustedImm32(~TagBitUndefined), resultGPR);
1732 JSValueRegs(op2GPR), rightChild, SpecCellCheck | SpecOther, m_jit.branch64(
1733 MacroAssembler::NotEqual, resultGPR,
1734 MacroAssembler::TrustedImm64(ValueNull)));
1740 void SpeculativeJIT::compileSymbolUntypedEquality(Node* node, Edge symbolEdge, Edge untypedEdge)
1742 SpeculateCellOperand symbol(this, symbolEdge);
1743 JSValueOperand untyped(this, untypedEdge);
1744 GPRTemporary result(this, Reuse, symbol, untyped);
1746 GPRReg symbolGPR = symbol.gpr();
1747 GPRReg untypedGPR = untyped.gpr();
1748 GPRReg resultGPR = result.gpr();
1750 speculateSymbol(symbolEdge, symbolGPR);
1752 // At this point we know that we can perform a straight-forward equality comparison on pointer
1753 // values because we are doing strict equality.
1754 m_jit.compare64(MacroAssembler::Equal, symbolGPR, untypedGPR, resultGPR);
1755 unblessedBooleanResult(resultGPR, node);
1758 void SpeculativeJIT::compileInt32Compare(Node* node, MacroAssembler::RelationalCondition condition)
1760 if (node->child1()->isInt32Constant()) {
1761 SpeculateInt32Operand op2(this, node->child2());
1762 GPRTemporary result(this, Reuse, op2);
1763 int32_t imm = node->child1()->asInt32();
1764 m_jit.compare32(condition, JITCompiler::Imm32(imm), op2.gpr(), result.gpr());
1766 // If we add a DataFormatBool, we should use it here.
1767 m_jit.or32(TrustedImm32(ValueFalse), result.gpr());
1768 jsValueResult(result.gpr(), m_currentNode, DataFormatJSBoolean);
1769 } else if (node->child2()->isInt32Constant()) {
1770 SpeculateInt32Operand op1(this, node->child1());
1771 GPRTemporary result(this, Reuse, op1);
1772 int32_t imm = node->child2()->asInt32();
1773 m_jit.compare32(condition, op1.gpr(), JITCompiler::Imm32(imm), result.gpr());
1775 // If we add a DataFormatBool, we should use it here.
1776 m_jit.or32(TrustedImm32(ValueFalse), result.gpr());
1777 jsValueResult(result.gpr(), m_currentNode, DataFormatJSBoolean);
1779 SpeculateInt32Operand op1(this, node->child1());
1780 SpeculateInt32Operand op2(this, node->child2());
1781 GPRTemporary result(this, Reuse, op1, op2);
1782 m_jit.compare32(condition, op1.gpr(), op2.gpr(), result.gpr());
1784 // If we add a DataFormatBool, we should use it here.
1785 m_jit.or32(TrustedImm32(ValueFalse), result.gpr());
1786 jsValueResult(result.gpr(), m_currentNode, DataFormatJSBoolean);
1790 void SpeculativeJIT::compileInt52Compare(Node* node, MacroAssembler::RelationalCondition condition)
1792 SpeculateWhicheverInt52Operand op1(this, node->child1());
1793 SpeculateWhicheverInt52Operand op2(this, node->child2(), op1);
1794 GPRTemporary result(this, Reuse, op1, op2);
1796 m_jit.compare64(condition, op1.gpr(), op2.gpr(), result.gpr());
1798 // If we add a DataFormatBool, we should use it here.
1799 m_jit.or32(TrustedImm32(ValueFalse), result.gpr());
1800 jsValueResult(result.gpr(), m_currentNode, DataFormatJSBoolean);
1803 void SpeculativeJIT::compilePeepHoleInt52Branch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1805 BasicBlock* taken = branchNode->branchData()->taken.block;
1806 BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1808 // The branch instruction will branch to the taken block.
1809 // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1810 if (taken == nextBlock()) {
1811 condition = JITCompiler::invert(condition);
1812 BasicBlock* tmp = taken;
1817 SpeculateWhicheverInt52Operand op1(this, node->child1());
1818 SpeculateWhicheverInt52Operand op2(this, node->child2(), op1);
1820 branch64(condition, op1.gpr(), op2.gpr(), taken);
1824 void SpeculativeJIT::compileDoubleCompare(Node* node, MacroAssembler::DoubleCondition condition)
1826 SpeculateDoubleOperand op1(this, node->child1());
1827 SpeculateDoubleOperand op2(this, node->child2());
1828 GPRTemporary result(this);
1830 m_jit.move(TrustedImm32(ValueTrue), result.gpr());
1831 MacroAssembler::Jump trueCase = m_jit.branchDouble(condition, op1.fpr(), op2.fpr());
1832 m_jit.xor64(TrustedImm32(true), result.gpr());
1833 trueCase.link(&m_jit);
1835 jsValueResult(result.gpr(), node, DataFormatJSBoolean);
1838 void SpeculativeJIT::compileCompareEqPtr(Node* node)
1840 JSValueOperand value(this, node->child1());
1841 GPRTemporary result(this);
1842 GPRReg valueGPR = value.gpr();
1843 GPRReg resultGPR = result.gpr();
1845 m_jit.move(TrustedImmPtr::weakPointer(m_jit.graph(), node->cellOperand()->cell()), resultGPR);
1846 m_jit.compare64(MacroAssembler::Equal, valueGPR, resultGPR, resultGPR);
1847 unblessedBooleanResult(resultGPR, node);
1850 void SpeculativeJIT::compileObjectOrOtherLogicalNot(Edge nodeUse)
1852 JSValueOperand value(this, nodeUse, ManualOperandSpeculation);
1853 GPRTemporary result(this);
1854 GPRReg valueGPR = value.gpr();
1855 GPRReg resultGPR = result.gpr();
1856 GPRTemporary structure;
1857 GPRReg structureGPR = InvalidGPRReg;
1858 GPRTemporary scratch;
1859 GPRReg scratchGPR = InvalidGPRReg;
1861 bool masqueradesAsUndefinedWatchpointValid =
1862 masqueradesAsUndefinedWatchpointIsStillValid();
1864 if (!masqueradesAsUndefinedWatchpointValid) {
1865 // The masquerades as undefined case will use the structure register, so allocate it here.
1866 // Do this at the top of the function to avoid branching around a register allocation.
1867 GPRTemporary realStructure(this);
1868 GPRTemporary realScratch(this);
1869 structure.adopt(realStructure);
1870 scratch.adopt(realScratch);
1871 structureGPR = structure.gpr();
1872 scratchGPR = scratch.gpr();
1875 MacroAssembler::Jump notCell = m_jit.branchIfNotCell(JSValueRegs(valueGPR));
1876 if (masqueradesAsUndefinedWatchpointValid) {
1878 JSValueRegs(valueGPR), nodeUse, (~SpecCellCheck) | SpecObject, m_jit.branchIfNotObject(valueGPR));
1881 JSValueRegs(valueGPR), nodeUse, (~SpecCellCheck) | SpecObject, m_jit.branchIfNotObject(valueGPR));
1883 MacroAssembler::Jump isNotMasqueradesAsUndefined =
1885 MacroAssembler::Zero,
1886 MacroAssembler::Address(valueGPR, JSCell::typeInfoFlagsOffset()),
1887 MacroAssembler::TrustedImm32(MasqueradesAsUndefined));
1889 m_jit.emitLoadStructure(*m_jit.vm(), valueGPR, structureGPR, scratchGPR);
1890 speculationCheck(BadType, JSValueRegs(valueGPR), nodeUse,
1892 MacroAssembler::Equal,
1893 MacroAssembler::Address(structureGPR, Structure::globalObjectOffset()),
1894 TrustedImmPtr::weakPointer(m_jit.graph(), m_jit.graph().globalObjectFor(m_currentNode->origin.semantic))));
1896 isNotMasqueradesAsUndefined.link(&m_jit);
1898 m_jit.move(TrustedImm32(ValueFalse), resultGPR);
1899 MacroAssembler::Jump done = m_jit.jump();
1901 notCell.link(&m_jit);
1903 if (needsTypeCheck(nodeUse, SpecCellCheck | SpecOther)) {
1904 m_jit.move(valueGPR, resultGPR);
1905 m_jit.and64(MacroAssembler::TrustedImm32(~TagBitUndefined), resultGPR);
1907 JSValueRegs(valueGPR), nodeUse, SpecCellCheck | SpecOther, m_jit.branch64(
1908 MacroAssembler::NotEqual,
1910 MacroAssembler::TrustedImm64(ValueNull)));
1912 m_jit.move(TrustedImm32(ValueTrue), resultGPR);
1916 jsValueResult(resultGPR, m_currentNode, DataFormatJSBoolean);
1919 void SpeculativeJIT::compileLogicalNot(Node* node)
1921 switch (node->child1().useKind()) {
1922 case ObjectOrOtherUse: {
1923 compileObjectOrOtherLogicalNot(node->child1());
1928 SpeculateInt32Operand value(this, node->child1());
1929 GPRTemporary result(this, Reuse, value);
1930 m_jit.compare32(MacroAssembler::Equal, value.gpr(), MacroAssembler::TrustedImm32(0), result.gpr());
1931 m_jit.or32(TrustedImm32(ValueFalse), result.gpr());
1932 jsValueResult(result.gpr(), node, DataFormatJSBoolean);
1936 case DoubleRepUse: {
1937 SpeculateDoubleOperand value(this, node->child1());
1938 FPRTemporary scratch(this);
1939 GPRTemporary result(this);
1940 m_jit.move(TrustedImm32(ValueFalse), result.gpr());
1941 MacroAssembler::Jump nonZero = m_jit.branchDoubleNonZero(value.fpr(), scratch.fpr());
1942 m_jit.xor32(TrustedImm32(true), result.gpr());
1943 nonZero.link(&m_jit);
1944 jsValueResult(result.gpr(), node, DataFormatJSBoolean);
1949 case KnownBooleanUse: {
1950 if (!needsTypeCheck(node->child1(), SpecBoolean)) {
1951 SpeculateBooleanOperand value(this, node->child1());
1952 GPRTemporary result(this, Reuse, value);
1954 m_jit.move(value.gpr(), result.gpr());
1955 m_jit.xor64(TrustedImm32(true), result.gpr());
1957 jsValueResult(result.gpr(), node, DataFormatJSBoolean);
1961 JSValueOperand value(this, node->child1(), ManualOperandSpeculation);
1962 GPRTemporary result(this); // FIXME: We could reuse, but on speculation fail would need recovery to restore tag (akin to add).
1964 m_jit.move(value.gpr(), result.gpr());
1965 m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), result.gpr());
1967 JSValueRegs(value.gpr()), node->child1(), SpecBoolean, m_jit.branchTest64(
1968 JITCompiler::NonZero, result.gpr(), TrustedImm32(static_cast<int32_t>(~1))));
1969 m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueTrue)), result.gpr());
1971 // If we add a DataFormatBool, we should use it here.
1972 jsValueResult(result.gpr(), node, DataFormatJSBoolean);
1977 JSValueOperand arg1(this, node->child1());
1978 GPRTemporary result(this);
1980 GPRReg arg1GPR = arg1.gpr();
1981 GPRReg resultGPR = result.gpr();
1983 FPRTemporary valueFPR(this);
1984 FPRTemporary tempFPR(this);
1986 bool shouldCheckMasqueradesAsUndefined = !masqueradesAsUndefinedWatchpointIsStillValid();
1987 JSGlobalObject* globalObject = m_jit.graph().globalObjectFor(node->origin.semantic);
1988 std::optional<GPRTemporary> scratch;
1989 GPRReg scratchGPR = InvalidGPRReg;
1990 if (shouldCheckMasqueradesAsUndefined) {
1991 scratch.emplace(this);
1992 scratchGPR = scratch->gpr();
1994 bool negateResult = true;
1995 m_jit.emitConvertValueToBoolean(*m_jit.vm(), JSValueRegs(arg1GPR), resultGPR, scratchGPR, valueFPR.fpr(), tempFPR.fpr(), shouldCheckMasqueradesAsUndefined, globalObject, negateResult);
1996 m_jit.or32(TrustedImm32(ValueFalse), resultGPR);
1997 jsValueResult(resultGPR, node, DataFormatJSBoolean);
2001 return compileStringZeroLength(node);
2003 case StringOrOtherUse:
2004 return compileLogicalNotStringOrOther(node);
2007 DFG_CRASH(m_jit.graph(), node, "Bad use kind");
2012 void SpeculativeJIT::emitObjectOrOtherBranch(Edge nodeUse, BasicBlock* taken, BasicBlock* notTaken)
2014 JSValueOperand value(this, nodeUse, ManualOperandSpeculation);
2015 GPRTemporary scratch(this);
2016 GPRTemporary structure;
2017 GPRReg valueGPR = value.gpr();
2018 GPRReg scratchGPR = scratch.gpr();
2019 GPRReg structureGPR = InvalidGPRReg;
2021 if (!masqueradesAsUndefinedWatchpointIsStillValid()) {
2022 GPRTemporary realStructure(this);
2023 structure.adopt(realStructure);
2024 structureGPR = structure.gpr();
2027 MacroAssembler::Jump notCell = m_jit.branchIfNotCell(JSValueRegs(valueGPR));
2028 if (masqueradesAsUndefinedWatchpointIsStillValid()) {
2030 JSValueRegs(valueGPR), nodeUse, (~SpecCellCheck) | SpecObject, m_jit.branchIfNotObject(valueGPR));
2033 JSValueRegs(valueGPR), nodeUse, (~SpecCellCheck) | SpecObject, m_jit.branchIfNotObject(valueGPR));
2035 JITCompiler::Jump isNotMasqueradesAsUndefined = m_jit.branchTest8(
2037 MacroAssembler::Address(valueGPR, JSCell::typeInfoFlagsOffset()),
2038 TrustedImm32(MasqueradesAsUndefined));
2040 m_jit.emitLoadStructure(*m_jit.vm(), valueGPR, structureGPR, scratchGPR);
2041 speculationCheck(BadType, JSValueRegs(valueGPR), nodeUse,
2043 MacroAssembler::Equal,
2044 MacroAssembler::Address(structureGPR, Structure::globalObjectOffset()),
2045 TrustedImmPtr::weakPointer(m_jit.graph(), m_jit.graph().globalObjectFor(m_currentNode->origin.semantic))));
2047 isNotMasqueradesAsUndefined.link(&m_jit);
2049 jump(taken, ForceJump);
2051 notCell.link(&m_jit);
2053 if (needsTypeCheck(nodeUse, SpecCellCheck | SpecOther)) {
2054 m_jit.move(valueGPR, scratchGPR);
2055 m_jit.and64(MacroAssembler::TrustedImm32(~TagBitUndefined), scratchGPR);
2057 JSValueRegs(valueGPR), nodeUse, SpecCellCheck | SpecOther, m_jit.branch64(
2058 MacroAssembler::NotEqual, scratchGPR, MacroAssembler::TrustedImm64(ValueNull)));
2062 noResult(m_currentNode);
2065 void SpeculativeJIT::emitBranch(Node* node)
2067 BasicBlock* taken = node->branchData()->taken.block;
2068 BasicBlock* notTaken = node->branchData()->notTaken.block;
2070 switch (node->child1().useKind()) {
2071 case ObjectOrOtherUse: {
2072 emitObjectOrOtherBranch(node->child1(), taken, notTaken);
2077 case DoubleRepUse: {
2078 if (node->child1().useKind() == Int32Use) {
2079 bool invert = false;
2081 if (taken == nextBlock()) {
2083 BasicBlock* tmp = taken;
2088 SpeculateInt32Operand value(this, node->child1());
2089 branchTest32(invert ? MacroAssembler::Zero : MacroAssembler::NonZero, value.gpr(), taken);
2091 SpeculateDoubleOperand value(this, node->child1());
2092 FPRTemporary scratch(this);
2093 branchDoubleNonZero(value.fpr(), scratch.fpr(), taken);
2103 emitStringBranch(node->child1(), taken, notTaken);
2107 case StringOrOtherUse: {
2108 emitStringOrOtherBranch(node->child1(), taken, notTaken);
2114 case KnownBooleanUse: {
2115 JSValueOperand value(this, node->child1(), ManualOperandSpeculation);
2116 GPRReg valueGPR = value.gpr();
2118 if (node->child1().useKind() == BooleanUse || node->child1().useKind() == KnownBooleanUse) {
2119 if (!needsTypeCheck(node->child1(), SpecBoolean)) {
2120 MacroAssembler::ResultCondition condition = MacroAssembler::NonZero;
2122 if (taken == nextBlock()) {
2123 condition = MacroAssembler::Zero;
2124 BasicBlock* tmp = taken;
2129 branchTest32(condition, valueGPR, TrustedImm32(true), taken);
2132 branch64(MacroAssembler::Equal, valueGPR, MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(false))), notTaken);
2133 branch64(MacroAssembler::Equal, valueGPR, MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(true))), taken);
2135 typeCheck(JSValueRegs(valueGPR), node->child1(), SpecBoolean, m_jit.jump());
2139 GPRTemporary result(this);
2140 FPRTemporary fprValue(this);
2141 FPRTemporary fprTemp(this);
2142 std::optional<GPRTemporary> scratch;
2144 GPRReg scratchGPR = InvalidGPRReg;
2145 bool shouldCheckMasqueradesAsUndefined = !masqueradesAsUndefinedWatchpointIsStillValid();
2146 if (shouldCheckMasqueradesAsUndefined) {
2147 scratch.emplace(this);
2148 scratchGPR = scratch->gpr();
2151 GPRReg resultGPR = result.gpr();
2152 FPRReg valueFPR = fprValue.fpr();
2153 FPRReg tempFPR = fprTemp.fpr();
2155 if (node->child1()->prediction() & SpecInt32Only) {
2156 branch64(MacroAssembler::Equal, valueGPR, MacroAssembler::TrustedImm64(JSValue::encode(jsNumber(0))), notTaken);
2157 branch64(MacroAssembler::AboveOrEqual, valueGPR, GPRInfo::tagTypeNumberRegister, taken);
2160 if (node->child1()->prediction() & SpecBoolean) {
2161 branch64(MacroAssembler::Equal, valueGPR, MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(false))), notTaken);
2162 branch64(MacroAssembler::Equal, valueGPR, MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(true))), taken);
2167 JSGlobalObject* globalObject = m_jit.graph().globalObjectFor(node->origin.semantic);
2168 m_jit.emitConvertValueToBoolean(*m_jit.vm(), JSValueRegs(valueGPR), resultGPR, scratchGPR, valueFPR, tempFPR, shouldCheckMasqueradesAsUndefined, globalObject);
2170 branchTest32(MacroAssembler::NonZero, resultGPR, taken);
2174 noResult(node, UseChildrenCalledExplicitly);
2179 DFG_CRASH(m_jit.graph(), m_currentNode, "Bad use kind");
2183 void SpeculativeJIT::compile(Node* node)
2185 NodeType op = node->op();
2187 #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
2188 m_jit.clearRegisterAllocationOffsets();
2193 case DoubleConstant:
2195 case PhantomDirectArguments:
2196 case PhantomClonedArguments:
2197 initConstantInfo(node);
2200 case LazyJSConstant:
2201 compileLazyJSConstant(node);
2205 speculate(node, node->child1());
2206 switch (node->child1().useKind()) {
2208 case DoubleRepRealUse:
2209 case DoubleRepAnyIntUse: {
2210 SpeculateDoubleOperand op(this, node->child1());
2211 FPRTemporary scratch(this, op);
2212 m_jit.moveDouble(op.fpr(), scratch.fpr());
2213 doubleResult(scratch.fpr(), node);
2217 SpeculateInt52Operand op(this, node->child1());
2218 GPRTemporary result(this, Reuse, op);
2219 m_jit.move(op.gpr(), result.gpr());
2220 int52Result(result.gpr(), node);
2224 JSValueOperand op(this, node->child1(), ManualOperandSpeculation);
2225 GPRTemporary result(this, Reuse, op);
2226 m_jit.move(op.gpr(), result.gpr());
2227 jsValueResult(result.gpr(), node);
2235 AbstractValue& value = m_state.variables().operand(node->local());
2237 // If the CFA is tracking this variable and it found that the variable
2238 // cannot have been assigned, then don't attempt to proceed.
2239 if (value.isClear()) {
2240 m_compileOkay = false;
2244 switch (node->variableAccessData()->flushFormat()) {
2245 case FlushedDouble: {
2246 FPRTemporary result(this);
2247 m_jit.loadDouble(JITCompiler::addressFor(node->machineLocal()), result.fpr());
2248 VirtualRegister virtualRegister = node->virtualRegister();
2249 m_fprs.retain(result.fpr(), virtualRegister, SpillOrderDouble);
2250 generationInfoFromVirtualRegister(virtualRegister).initDouble(node, node->refCount(), result.fpr());
2254 case FlushedInt32: {
2255 GPRTemporary result(this);
2256 m_jit.load32(JITCompiler::payloadFor(node->machineLocal()), result.gpr());
2258 // Like int32Result, but don't useChildren - our children are phi nodes,
2259 // and don't represent values within this dataflow with virtual registers.
2260 VirtualRegister virtualRegister = node->virtualRegister();
2261 m_gprs.retain(result.gpr(), virtualRegister, SpillOrderInteger);
2262 generationInfoFromVirtualRegister(virtualRegister).initInt32(node, node->refCount(), result.gpr());
2266 case FlushedInt52: {
2267 GPRTemporary result(this);
2268 m_jit.load64(JITCompiler::addressFor(node->machineLocal()), result.gpr());
2270 VirtualRegister virtualRegister = node->virtualRegister();
2271 m_gprs.retain(result.gpr(), virtualRegister, SpillOrderJS);
2272 generationInfoFromVirtualRegister(virtualRegister).initInt52(node, node->refCount(), result.gpr());
2277 GPRTemporary result(this);
2278 m_jit.load64(JITCompiler::addressFor(node->machineLocal()), result.gpr());
2280 // Like jsValueResult, but don't useChildren - our children are phi nodes,
2281 // and don't represent values within this dataflow with virtual registers.
2282 VirtualRegister virtualRegister = node->virtualRegister();
2283 m_gprs.retain(result.gpr(), virtualRegister, SpillOrderJS);
2286 if (isCellSpeculation(value.m_type))
2287 format = DataFormatJSCell;
2288 else if (isBooleanSpeculation(value.m_type))
2289 format = DataFormatJSBoolean;
2291 format = DataFormatJS;
2293 generationInfoFromVirtualRegister(virtualRegister).initJSValue(node, node->refCount(), result.gpr(), format);
2299 case GetLocalUnlinked: {
2300 GPRTemporary result(this);
2302 m_jit.load64(JITCompiler::addressFor(node->unlinkedMachineLocal()), result.gpr());
2304 jsValueResult(result.gpr(), node);
2309 compileMovHint(m_currentNode);
2315 recordSetLocal(m_currentNode->unlinkedLocal(), VirtualRegister(), DataFormatDead);
2326 switch (node->variableAccessData()->flushFormat()) {
2327 case FlushedDouble: {
2328 SpeculateDoubleOperand value(this, node->child1());
2329 m_jit.storeDouble(value.fpr(), JITCompiler::addressFor(node->machineLocal()));
2331 // Indicate that it's no longer necessary to retrieve the value of
2332 // this bytecode variable from registers or other locations in the stack,
2333 // but that it is stored as a double.
2334 recordSetLocal(DataFormatDouble);
2338 case FlushedInt32: {
2339 SpeculateInt32Operand value(this, node->child1());
2340 m_jit.store32(value.gpr(), JITCompiler::payloadFor(node->machineLocal()));
2342 recordSetLocal(DataFormatInt32);
2346 case FlushedInt52: {
2347 SpeculateInt52Operand value(this, node->child1());
2348 m_jit.store64(value.gpr(), JITCompiler::addressFor(node->machineLocal()));
2350 recordSetLocal(DataFormatInt52);
2355 SpeculateCellOperand cell(this, node->child1());
2356 GPRReg cellGPR = cell.gpr();
2357 m_jit.store64(cellGPR, JITCompiler::addressFor(node->machineLocal()));
2359 recordSetLocal(DataFormatCell);
2363 case FlushedBoolean: {
2364 SpeculateBooleanOperand boolean(this, node->child1());
2365 m_jit.store64(boolean.gpr(), JITCompiler::addressFor(node->machineLocal()));
2367 recordSetLocal(DataFormatBoolean);
2371 case FlushedJSValue: {
2372 JSValueOperand value(this, node->child1());
2373 m_jit.store64(value.gpr(), JITCompiler::addressFor(node->machineLocal()));
2375 recordSetLocal(dataFormatFor(node->variableAccessData()->flushFormat()));
2380 DFG_CRASH(m_jit.graph(), node, "Bad flush format");
2388 // This is a no-op; it just marks the fact that the argument is being used.
2389 // But it may be profitable to use this as a hook to run speculation checks
2390 // on arguments, thereby allowing us to trivially eliminate such checks if
2391 // the argument is not used.
2392 recordSetLocal(dataFormatFor(node->variableAccessData()->flushFormat()));
2398 compileBitwiseOp(node);
2404 compileShiftOp(node);
2407 case UInt32ToNumber: {
2408 compileUInt32ToNumber(node);
2412 case DoubleAsInt32: {
2413 compileDoubleAsInt32(node);
2417 case ValueToInt32: {
2418 compileValueToInt32(node);
2423 compileDoubleRep(node);
2428 compileValueRep(node);
2433 switch (node->child1().useKind()) {
2435 SpeculateInt32Operand operand(this, node->child1());
2436 GPRTemporary result(this, Reuse, operand);
2438 m_jit.signExtend32ToPtr(operand.gpr(), result.gpr());
2440 strictInt52Result(result.gpr(), node);
2445 GPRTemporary result(this);
2446 GPRReg resultGPR = result.gpr();
2448 convertAnyInt(node->child1(), resultGPR);
2450 strictInt52Result(resultGPR, node);
2454 case DoubleRepAnyIntUse: {
2455 SpeculateDoubleOperand value(this, node->child1());
2456 FPRReg valueFPR = value.fpr();
2458 GPRFlushedCallResult result(this);
2459 GPRReg resultGPR = result.gpr();
2463 callOperation(operationConvertDoubleToInt52, resultGPR, valueFPR);
2465 DFG_TYPE_CHECK_WITH_EXIT_KIND(Int52Overflow,
2466 JSValueRegs(), node->child1(), SpecAnyIntAsDouble,
2468 JITCompiler::Equal, resultGPR,
2469 JITCompiler::TrustedImm64(JSValue::notInt52)));
2471 strictInt52Result(resultGPR, node);
2476 DFG_CRASH(m_jit.graph(), node, "Bad use kind");
2482 compileValueAdd(node);
2486 JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2487 JSValueOperand op2(this, node->child2(), ManualOperandSpeculation);
2488 JSValueOperand op3(this, node->child3(), ManualOperandSpeculation);
2490 GPRReg op1GPR = op1.gpr();
2491 GPRReg op2GPR = op2.gpr();
2496 op3GPR = InvalidGPRReg;
2500 GPRFlushedCallResult result(this);
2502 callOperation(operationStrCat3, result.gpr(), op1GPR, op2GPR, op3GPR);
2504 callOperation(operationStrCat2, result.gpr(), op1GPR, op2GPR);
2505 m_jit.exceptionCheck();
2507 cellResult(result.gpr(), node);
2512 compileArithAdd(node);
2516 compileArithClz32(node);
2520 compileMakeRope(node);
2524 compileArithSub(node);
2528 compileArithNegate(node);
2532 compileArithMul(node);
2536 compileArithDiv(node);
2541 compileArithMod(node);
2546 compileArithAbs(node);
2551 switch (node->binaryUseKind()) {
2553 SpeculateStrictInt32Operand op1(this, node->child1());
2554 SpeculateStrictInt32Operand op2(this, node->child2());
2555 GPRTemporary result(this, Reuse, op1);
2557 MacroAssembler::Jump op1Less = m_jit.branch32(op == ArithMin ? MacroAssembler::LessThan : MacroAssembler::GreaterThan, op1.gpr(), op2.gpr());
2558 m_jit.move(op2.gpr(), result.gpr());
2559 if (op1.gpr() != result.gpr()) {
2560 MacroAssembler::Jump done = m_jit.jump();
2561 op1Less.link(&m_jit);
2562 m_jit.move(op1.gpr(), result.gpr());
2565 op1Less.link(&m_jit);
2567 int32Result(result.gpr(), node);
2571 case DoubleRepUse: {
2572 SpeculateDoubleOperand op1(this, node->child1());
2573 SpeculateDoubleOperand op2(this, node->child2());
2574 FPRTemporary result(this, op1);
2576 FPRReg op1FPR = op1.fpr();
2577 FPRReg op2FPR = op2.fpr();
2578 FPRReg resultFPR = result.fpr();
2580 MacroAssembler::JumpList done;
2582 MacroAssembler::Jump op1Less = m_jit.branchDouble(op == ArithMin ? MacroAssembler::DoubleLessThan : MacroAssembler::DoubleGreaterThan, op1FPR, op2FPR);
2584 // op2 is eather the lesser one or one of then is NaN
2585 MacroAssembler::Jump op2Less = m_jit.branchDouble(op == ArithMin ? MacroAssembler::DoubleGreaterThanOrEqual : MacroAssembler::DoubleLessThanOrEqual, op1FPR, op2FPR);
2587 // Unordered case. We don't know which of op1, op2 is NaN. Manufacture NaN by adding
2588 // op1 + op2 and putting it into result.
2589 m_jit.addDouble(op1FPR, op2FPR, resultFPR);
2590 done.append(m_jit.jump());
2592 op2Less.link(&m_jit);
2593 m_jit.moveDouble(op2FPR, resultFPR);
2595 if (op1FPR != resultFPR) {
2596 done.append(m_jit.jump());
2598 op1Less.link(&m_jit);
2599 m_jit.moveDouble(op1FPR, resultFPR);
2601 op1Less.link(&m_jit);
2605 doubleResult(resultFPR, node);
2610 DFG_CRASH(m_jit.graph(), node, "Bad use kind");
2617 compileArithPow(node);
2621 compileArithSqrt(node);
2625 compileArithFRound(node);
2629 compileArithRandom(node);
2636 compileArithRounding(node);
2640 compileArithUnary(node);
2644 compileLogicalNot(node);
2648 if (compare(node, JITCompiler::LessThan, JITCompiler::DoubleLessThan, operationCompareLess))
2653 if (compare(node, JITCompiler::LessThanOrEqual, JITCompiler::DoubleLessThanOrEqual, operationCompareLessEq))
2657 case CompareGreater:
2658 if (compare(node, JITCompiler::GreaterThan, JITCompiler::DoubleGreaterThan, operationCompareGreater))
2662 case CompareGreaterEq:
2663 if (compare(node, JITCompiler::GreaterThanOrEqual, JITCompiler::DoubleGreaterThanOrEqual, operationCompareGreaterEq))
2668 compileCompareUnsigned(node, JITCompiler::Below);
2671 case CompareBelowEq:
2672 compileCompareUnsigned(node, JITCompiler::BelowOrEqual);
2676 if (compare(node, JITCompiler::Equal, JITCompiler::DoubleEqual, operationCompareEq))
2680 case CompareStrictEq:
2681 if (compileStrictEq(node))
2686 compileCompareEqPtr(node);
2689 case StringCharCodeAt: {
2690 compileGetCharCodeAt(node);
2694 case StringCharAt: {
2695 // Relies on StringCharAt node having same basic layout as GetByVal
2696 compileGetByValOnString(node);
2700 case StringFromCharCode: {
2701 compileFromCharCode(node);
2711 case ArrayifyToStructure: {
2717 switch (node->arrayMode().type()) {
2718 case Array::SelectUsingPredictions:
2719 case Array::ForceExit:
2720 DFG_CRASH(m_jit.graph(), node, "Bad array mode type");
2722 case Array::Undecided: {
2723 SpeculateStrictInt32Operand index(this, node->child2());
2724 GPRTemporary result(this, Reuse, index);
2725 GPRReg indexGPR = index.gpr();
2726 GPRReg resultGPR = result.gpr();
2728 speculationCheck(OutOfBounds, JSValueRegs(), node,
2729 m_jit.branch32(MacroAssembler::LessThan, indexGPR, MacroAssembler::TrustedImm32(0)));
2731 use(node->child1());
2734 m_jit.move(MacroAssembler::TrustedImm64(ValueUndefined), resultGPR);
2735 jsValueResult(resultGPR, node, UseChildrenCalledExplicitly);
2738 case Array::Generic: {
2739 if (node->child1().useKind() == ObjectUse) {
2740 if (node->child2().useKind() == StringUse) {
2741 compileGetByValForObjectWithString(node);
2745 if (node->child2().useKind() == SymbolUse) {
2746 compileGetByValForObjectWithSymbol(node);
2750 JSValueOperand base(this, node->child1());
2751 JSValueOperand property(this, node->child2());
2752 GPRReg baseGPR = base.gpr();
2753 GPRReg propertyGPR = property.gpr();
2756 GPRFlushedCallResult result(this);
2757 callOperation(operationGetByVal, result.gpr(), baseGPR, propertyGPR);
2758 m_jit.exceptionCheck();
2760 jsValueResult(result.gpr(), node);
2764 case Array::Contiguous: {
2765 if (node->arrayMode().isInBounds()) {
2766 SpeculateStrictInt32Operand property(this, node->child2());
2767 StorageOperand storage(this, node->child3());
2769 GPRReg propertyReg = property.gpr();
2770 GPRReg storageReg = storage.gpr();
2775 speculationCheck(OutOfBounds, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())));
2777 GPRTemporary result(this);
2778 m_jit.load64(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight), result.gpr());
2779 if (node->arrayMode().isSaneChain()) {
2780 ASSERT(node->arrayMode().type() == Array::Contiguous);
2781 JITCompiler::Jump notHole = m_jit.branchTest64(
2782 MacroAssembler::NonZero, result.gpr());
2783 m_jit.move(TrustedImm64(JSValue::encode(jsUndefined())), result.gpr());
2784 notHole.link(&m_jit);
2787 LoadFromHole, JSValueRegs(), 0,
2788 m_jit.branchTest64(MacroAssembler::Zero, result.gpr()));
2790 jsValueResult(result.gpr(), node, node->arrayMode().type() == Array::Int32 ? DataFormatJSInt32 : DataFormatJS);
2794 SpeculateCellOperand base(this, node->child1());
2795 SpeculateStrictInt32Operand property(this, node->child2());
2796 StorageOperand storage(this, node->child3());
2798 GPRReg baseReg = base.gpr();
2799 GPRReg propertyReg = property.gpr();
2800 GPRReg storageReg = storage.gpr();
2805 GPRTemporary result(this);
2806 GPRReg resultReg = result.gpr();
2808 MacroAssembler::JumpList slowCases;
2810 slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())));
2812 m_jit.load64(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight), resultReg);
2813 slowCases.append(m_jit.branchTest64(MacroAssembler::Zero, resultReg));
2815 addSlowPathGenerator(
2817 slowCases, this, operationGetByValObjectInt,
2818 result.gpr(), baseReg, propertyReg));
2820 jsValueResult(resultReg, node);
2824 case Array::Double: {
2825 if (node->arrayMode().isInBounds()) {
2826 SpeculateStrictInt32Operand property(this, node->child2());
2827 StorageOperand storage(this, node->child3());
2829 GPRReg propertyReg = property.gpr();
2830 GPRReg storageReg = storage.gpr();
2835 speculationCheck(OutOfBounds, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())));
2837 FPRTemporary result(this);
2838 m_jit.loadDouble(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight), result.fpr());
2839 if (!node->arrayMode().isSaneChain())
2840 speculationCheck(LoadFromHole, JSValueRegs(), 0, m_jit.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered, result.fpr(), result.fpr()));
2841 doubleResult(result.fpr(), node);
2845 SpeculateCellOperand base(this, node->child1());
2846 SpeculateStrictInt32Operand property(this, node->child2());
2847 StorageOperand storage(this, node->child3());
2849 GPRReg baseReg = base.gpr();
2850 GPRReg propertyReg = property.gpr();
2851 GPRReg storageReg = storage.gpr();
2856 GPRTemporary result(this);
2857 FPRTemporary temp(this);
2858 GPRReg resultReg = result.gpr();
2859 FPRReg tempReg = temp.fpr();
2861 MacroAssembler::JumpList slowCases;
2863 slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())));
2865 m_jit.loadDouble(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight), tempReg);
2866 slowCases.append(m_jit.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered, tempReg, tempReg));
2867 boxDouble(tempReg, resultReg);
2869 addSlowPathGenerator(
2871 slowCases, this, operationGetByValObjectInt,
2872 result.gpr(), baseReg, propertyReg));
2874 jsValueResult(resultReg, node);
2878 case Array::ArrayStorage:
2879 case Array::SlowPutArrayStorage: {
2880 if (node->arrayMode().isInBounds()) {
2881 SpeculateStrictInt32Operand property(this, node->child2());
2882 StorageOperand storage(this, node->child3());
2884 GPRReg propertyReg = property.gpr();
2885 GPRReg storageReg = storage.gpr();
2890 speculationCheck(OutOfBounds, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, ArrayStorage::vectorLengthOffset())));
2892 GPRTemporary result(this);
2893 m_jit.load64(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, ArrayStorage::vectorOffset()), result.gpr());
2894 speculationCheck(LoadFromHole, JSValueRegs(), 0, m_jit.branchTest64(MacroAssembler::Zero, result.gpr()));
2896 jsValueResult(result.gpr(), node);
2900 SpeculateCellOperand base(this, node->child1());
2901 SpeculateStrictInt32Operand property(this, node->child2());
2902 StorageOperand storage(this, node->child3());
2904 GPRReg baseReg = base.gpr();
2905 GPRReg propertyReg = property.gpr();
2906 GPRReg storageReg = storage.gpr();
2911 GPRTemporary result(this);
2912 GPRReg resultReg = result.gpr();
2914 MacroAssembler::JumpList slowCases;
2916 slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, ArrayStorage::vectorLengthOffset())));
2918 m_jit.load64(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, ArrayStorage::vectorOffset()), resultReg);
2919 slowCases.append(m_jit.branchTest64(MacroAssembler::Zero, resultReg));
2921 addSlowPathGenerator(
2923 slowCases, this, operationGetByValObjectInt,
2924 result.gpr(), baseReg, propertyReg));
2926 jsValueResult(resultReg, node);
2930 compileGetByValOnString(node);
2932 case Array::DirectArguments:
2933 compileGetByValOnDirectArguments(node);
2935 case Array::ScopedArguments:
2936 compileGetByValOnScopedArguments(node);
2939 TypedArrayType type = node->arrayMode().typedArrayType();
2941 compileGetByValOnIntTypedArray(node, type);
2943 compileGetByValOnFloatTypedArray(node, type);
2948 case GetByValWithThis: {
2949 JSValueOperand base(this, node->child1());
2950 GPRReg baseGPR = base.gpr();
2951 JSValueOperand thisValue(this, node->child2());
2952 GPRReg thisValueGPR = thisValue.gpr();
2953 JSValueOperand subscript(this, node->child3());
2954 GPRReg subscriptGPR = subscript.gpr();
2956 GPRFlushedCallResult result(this);
2957 GPRReg resultGPR = result.gpr();
2960 callOperation(operationGetByValWithThis, resultGPR, baseGPR, thisValueGPR, subscriptGPR);
2961 m_jit.exceptionCheck();
2963 jsValueResult(resultGPR, node);
2967 case PutByValDirect:
2969 case PutByValAlias: {
2970 Edge child1 = m_jit.graph().varArgChild(node, 0);
2971 Edge child2 = m_jit.graph().varArgChild(node, 1);
2972 Edge child3 = m_jit.graph().varArgChild(node, 2);
2973 Edge child4 = m_jit.graph().varArgChild(node, 3);
2975 ArrayMode arrayMode = node->arrayMode().modeForPut();
2976 bool alreadyHandled = false;
2978 switch (arrayMode.type()) {
2979 case Array::SelectUsingPredictions:
2980 case Array::ForceExit:
2981 DFG_CRASH(m_jit.graph(), node, "Bad array mode type");
2983 case Array::Generic: {
2984 DFG_ASSERT(m_jit.graph(), node, node->op() == PutByVal || node->op() == PutByValDirect);
2986 if (child1.useKind() == CellUse) {
2987 if (child2.useKind() == StringUse) {
2988 compilePutByValForCellWithString(node, child1, child2, child3);
2989 alreadyHandled = true;
2993 if (child2.useKind() == SymbolUse) {
2994 compilePutByValForCellWithSymbol(node, child1, child2, child3);
2995 alreadyHandled = true;
3000 JSValueOperand arg1(this, child1);
3001 JSValueOperand arg2(this, child2);
3002 JSValueOperand arg3(this, child3);
3003 GPRReg arg1GPR = arg1.gpr();
3004 GPRReg arg2GPR = arg2.gpr();
3005 GPRReg arg3GPR = arg3.gpr();
3007 if (node->op() == PutByValDirect)
3008 callOperation(m_jit.isStrictModeFor(node->origin.semantic) ? operationPutByValDirectStrict : operationPutByValDirectNonStrict, arg1GPR, arg2GPR, arg3GPR);
3010 callOperation(m_jit.isStrictModeFor(node->origin.semantic) ? operationPutByValStrict : operationPutByValNonStrict, arg1GPR, arg2GPR, arg3GPR);
3011 m_jit.exceptionCheck();
3014 alreadyHandled = true;
3024 // FIXME: the base may not be necessary for some array access modes. But we have to
3025 // keep it alive to this point, so it's likely to be in a register anyway. Likely
3026 // no harm in locking it here.
3027 SpeculateCellOperand base(this, child1);
3028 SpeculateStrictInt32Operand property(this, child2);
3030 GPRReg baseReg = base.gpr();
3031 GPRReg propertyReg = property.gpr();
3033 switch (arrayMode.type()) {
3035 case Array::Contiguous: {
3036 JSValueOperand value(this, child3, ManualOperandSpeculation);
3038 GPRReg valueReg = value.gpr();
3043 if (arrayMode.type() == Array::Int32) {
3045 JSValueRegs(valueReg), child3, SpecInt32Only,
3047 MacroAssembler::Below, valueReg, GPRInfo::tagTypeNumberRegister));
3050 StorageOperand storage(this, child4);
3051 GPRReg storageReg = storage.gpr();
3053 if (node->op() == PutByValAlias) {
3054 // Store the value to the array.
3055 GPRReg propertyReg = property.gpr();
3056 GPRReg valueReg = value.gpr();
3057 m_jit.store64(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
3063 GPRTemporary temporary;
3064 GPRReg temporaryReg = temporaryRegisterForPutByVal(temporary, node);
3066 MacroAssembler::Jump slowCase;
3068 if (arrayMode.isInBounds()) {
3070 OutOfBounds, JSValueRegs(), 0,
3071 m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())));
3073 MacroAssembler::Jump inBounds = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
3075 slowCase = m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfVectorLength()));
3077 if (!arrayMode.isOutOfBounds())
3078 speculationCheck(OutOfBounds, JSValueRegs(), 0, slowCase);
3080 m_jit.add32(TrustedImm32(1), propertyReg, temporaryReg);
3081 m_jit.store32(temporaryReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
3083 inBounds.link(&m_jit);
3086 m_jit.store64(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
3093 if (arrayMode.isOutOfBounds()) {
3094 if (node->op() == PutByValDirect) {
3095 addSlowPathGenerator(slowPathCall(
3097 m_jit.codeBlock()->isStrictMode() ? operationPutByValDirectBeyondArrayBoundsStrict : operationPutByValDirectBeyondArrayBoundsNonStrict,
3098 NoResult, baseReg, propertyReg, valueReg));
3100 addSlowPathGenerator(slowPathCall(
3102 m_jit.codeBlock()->isStrictMode() ? operationPutByValBeyondArrayBoundsStrict : operationPutByValBeyondArrayBoundsNonStrict,
3103 NoResult, baseReg, propertyReg, valueReg));
3107 noResult(node, UseChildrenCalledExplicitly);
3111 case Array::Double: {
3112 compileDoublePutByVal(node, base, property);
3116 case Array::ArrayStorage:
3117 case Array::SlowPutArrayStorage: {
3118 JSValueOperand value(this, child3);
3120 GPRReg valueReg = value.gpr();
3125 StorageOperand storage(this, child4);
3126 GPRReg storageReg = storage.gpr();
3128 if (node->op() == PutByValAlias) {
3129 // Store the value to the array.
3130 GPRReg propertyReg = property.gpr();
3131 GPRReg valueReg = value.gpr();
3132 m_jit.store64(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, ArrayStorage::vectorOffset()));
3138 GPRTemporary temporary;
3139 GPRReg temporaryReg = temporaryRegisterForPutByVal(temporary, node);
3141 MacroAssembler::JumpList slowCases;
3143 MacroAssembler::Jump beyondArrayBounds = m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, ArrayStorage::vectorLengthOffset()));
3144 if (!arrayMode.isOutOfBounds())
3145 speculationCheck(OutOfBounds, JSValueRegs(), 0, beyondArrayBounds);
3147 slowCases.append(beyondArrayBounds);
3149 // Check if we're writing to a hole; if so increment m_numValuesInVector.
3150 if (arrayMode.isInBounds()) {
3152 StoreToHole, JSValueRegs(), 0,
3153 m_jit.branchTest64(MacroAssembler::Zero, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, ArrayStorage::vectorOffset())));
3155 MacroAssembler::Jump notHoleValue = m_jit.branchTest64(MacroAssembler::NonZero, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, ArrayStorage::vectorOffset()));
3156 if (arrayMode.isSlowPut()) {
3157 // This is sort of strange. If we wanted to optimize this code path, we would invert
3158 // the above branch. But it's simply not worth it since this only happens if we're
3159 // already having a bad time.
3160 slowCases.append(m_jit.jump());
3162 m_jit.add32(TrustedImm32(1), MacroAssembler::Address(storageReg, ArrayStorage::numValuesInVectorOffset()));
3164 // If we're writing to a hole we might be growing the array;
3165 MacroAssembler::Jump lengthDoesNotNeedUpdate = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(storageReg, ArrayStorage::lengthOffset()));
3166 m_jit.add32(TrustedImm32(1), propertyReg, temporaryReg);
3167 m_jit.store32(temporaryReg, MacroAssembler::Address(storageReg, ArrayStorage::lengthOffset()));
3169 lengthDoesNotNeedUpdate.link(&m_jit);
3171 notHoleValue.link(&m_jit);
3174 // Store the value to the array.
3175 m_jit.store64(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, ArrayStorage::vectorOffset()));
3182 if (!slowCases.empty()) {
3183 if (node->op() == PutByValDirect) {
3184 addSlowPathGenerator(slowPathCall(
3186 m_jit.codeBlock()->isStrictMode() ? operationPutByValDirectBeyondArrayBoundsStrict : operationPutByValDirectBeyondArrayBoundsNonStrict,
3187 NoResult, baseReg, propertyReg, valueReg));