2 * Copyright (C) 2011-2016 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include "DFGSpeculativeJIT.h"
31 #include "ArrayPrototype.h"
32 #include "CallFrameShuffler.h"
33 #include "DFGAbstractInterpreterInlines.h"
34 #include "DFGCallArrayAllocatorSlowPathGenerator.h"
35 #include "DFGOperations.h"
36 #include "DFGSlowPathGenerator.h"
37 #include "DirectArguments.h"
38 #include "GetterSetter.h"
39 #include "HasOwnPropertyCache.h"
40 #include "JSCInlines.h"
41 #include "JSEnvironmentRecord.h"
42 #include "JSLexicalEnvironment.h"
44 #include "JSPropertyNameEnumerator.h"
46 #include "ObjectPrototype.h"
47 #include "SetupVarargsFrame.h"
48 #include "SpillRegistersMode.h"
49 #include "StringPrototype.h"
50 #include "TypeProfilerLog.h"
53 namespace JSC { namespace DFG {
57 void SpeculativeJIT::boxInt52(GPRReg sourceGPR, GPRReg targetGPR, DataFormat format)
60 if (sourceGPR == targetGPR)
65 FPRReg fpr = fprAllocate();
67 if (format == DataFormatInt52)
68 m_jit.rshift64(TrustedImm32(JSValue::int52ShiftAmount), sourceGPR);
70 ASSERT(format == DataFormatStrictInt52);
72 m_jit.boxInt52(sourceGPR, targetGPR, tempGPR, fpr);
74 if (format == DataFormatInt52 && sourceGPR != targetGPR)
75 m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), sourceGPR);
77 if (tempGPR != targetGPR)
83 GPRReg SpeculativeJIT::fillJSValue(Edge edge)
85 VirtualRegister virtualRegister = edge->virtualRegister();
86 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
88 switch (info.registerFormat()) {
89 case DataFormatNone: {
90 GPRReg gpr = allocate();
92 if (edge->hasConstant()) {
93 JSValue jsValue = edge->asJSValue();
94 m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsValue)), gpr);
95 info.fillJSValue(*m_stream, gpr, DataFormatJS);
96 m_gprs.retain(gpr, virtualRegister, SpillOrderConstant);
98 DataFormat spillFormat = info.spillFormat();
99 m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
100 switch (spillFormat) {
101 case DataFormatInt32: {
102 m_jit.load32(JITCompiler::addressFor(virtualRegister), gpr);
103 m_jit.or64(GPRInfo::tagTypeNumberRegister, gpr);
104 spillFormat = DataFormatJSInt32;
109 m_jit.load64(JITCompiler::addressFor(virtualRegister), gpr);
110 DFG_ASSERT(m_jit.graph(), m_currentNode, spillFormat & DataFormatJS);
113 info.fillJSValue(*m_stream, gpr, spillFormat);
118 case DataFormatInt32: {
119 GPRReg gpr = info.gpr();
120 // If the register has already been locked we need to take a copy.
121 // If not, we'll zero extend in place, so mark on the info that this is now type DataFormatInt32, not DataFormatJSInt32.
122 if (m_gprs.isLocked(gpr)) {
123 GPRReg result = allocate();
124 m_jit.or64(GPRInfo::tagTypeNumberRegister, gpr, result);
128 m_jit.or64(GPRInfo::tagTypeNumberRegister, gpr);
129 info.fillJSValue(*m_stream, gpr, DataFormatJSInt32);
134 // No retag required on JSVALUE64!
136 case DataFormatJSInt32:
137 case DataFormatJSDouble:
138 case DataFormatJSCell:
139 case DataFormatJSBoolean: {
140 GPRReg gpr = info.gpr();
145 case DataFormatBoolean:
146 case DataFormatStorage:
147 case DataFormatDouble:
148 case DataFormatInt52:
149 // this type currently never occurs
150 DFG_CRASH(m_jit.graph(), m_currentNode, "Bad data format");
153 DFG_CRASH(m_jit.graph(), m_currentNode, "Corrupt data format");
154 return InvalidGPRReg;
158 void SpeculativeJIT::cachedGetById(CodeOrigin origin, JSValueRegs base, JSValueRegs result, unsigned identifierNumber, JITCompiler::Jump slowPathTarget , SpillRegistersMode mode, AccessType type)
160 cachedGetById(origin, base.gpr(), result.gpr(), identifierNumber, slowPathTarget, mode, type);
163 void SpeculativeJIT::cachedGetById(CodeOrigin codeOrigin, GPRReg baseGPR, GPRReg resultGPR, unsigned identifierNumber, JITCompiler::Jump slowPathTarget, SpillRegistersMode spillMode, AccessType type)
165 CallSiteIndex callSite = m_jit.recordCallSiteAndGenerateExceptionHandlingOSRExitIfNeeded(codeOrigin, m_stream->size());
166 RegisterSet usedRegisters = this->usedRegisters();
167 if (spillMode == DontSpill) {
168 // We've already flushed registers to the stack, we don't need to spill these.
169 usedRegisters.set(baseGPR, false);
170 usedRegisters.set(resultGPR, false);
172 JITGetByIdGenerator gen(
173 m_jit.codeBlock(), codeOrigin, callSite, usedRegisters, identifierUID(identifierNumber),
174 JSValueRegs(baseGPR), JSValueRegs(resultGPR), type);
175 gen.generateFastPath(m_jit);
177 JITCompiler::JumpList slowCases;
178 if (slowPathTarget.isSet())
179 slowCases.append(slowPathTarget);
180 slowCases.append(gen.slowPathJump());
182 auto slowPath = slowPathCall(
183 slowCases, this, type == AccessType::Get ? operationGetByIdOptimize : operationTryGetByIdOptimize,
184 spillMode, ExceptionCheckRequirement::CheckNeeded,
185 resultGPR, gen.stubInfo(), baseGPR, identifierUID(identifierNumber));
187 m_jit.addGetById(gen, slowPath.get());
188 addSlowPathGenerator(WTFMove(slowPath));
191 void SpeculativeJIT::cachedPutById(CodeOrigin codeOrigin, GPRReg baseGPR, GPRReg valueGPR, GPRReg scratchGPR, unsigned identifierNumber, PutKind putKind, JITCompiler::Jump slowPathTarget, SpillRegistersMode spillMode)
193 CallSiteIndex callSite = m_jit.recordCallSiteAndGenerateExceptionHandlingOSRExitIfNeeded(codeOrigin, m_stream->size());
194 RegisterSet usedRegisters = this->usedRegisters();
195 if (spillMode == DontSpill) {
196 // We've already flushed registers to the stack, we don't need to spill these.
197 usedRegisters.set(baseGPR, false);
198 usedRegisters.set(valueGPR, false);
201 JITPutByIdGenerator gen(
202 m_jit.codeBlock(), codeOrigin, callSite, usedRegisters, JSValueRegs(baseGPR),
203 JSValueRegs(valueGPR), scratchGPR, m_jit.ecmaModeFor(codeOrigin), putKind);
205 gen.generateFastPath(m_jit);
207 JITCompiler::JumpList slowCases;
208 if (slowPathTarget.isSet())
209 slowCases.append(slowPathTarget);
210 slowCases.append(gen.slowPathJump());
212 auto slowPath = slowPathCall(
213 slowCases, this, gen.slowPathFunction(), NoResult, gen.stubInfo(), valueGPR, baseGPR,
214 identifierUID(identifierNumber));
216 m_jit.addPutById(gen, slowPath.get());
217 addSlowPathGenerator(WTFMove(slowPath));
220 void SpeculativeJIT::nonSpeculativeNonPeepholeCompareNullOrUndefined(Edge operand)
222 ASSERT_WITH_MESSAGE(!masqueradesAsUndefinedWatchpointIsStillValid() || !isKnownCell(operand.node()), "The Compare should have been eliminated, it is known to be always false.");
224 JSValueOperand arg(this, operand, ManualOperandSpeculation);
225 GPRReg argGPR = arg.gpr();
227 GPRTemporary result(this);
228 GPRReg resultGPR = result.gpr();
230 m_jit.move(TrustedImm32(0), resultGPR);
232 JITCompiler::JumpList done;
233 if (masqueradesAsUndefinedWatchpointIsStillValid()) {
234 if (!isKnownNotCell(operand.node()))
235 done.append(m_jit.branchIfCell(JSValueRegs(argGPR)));
237 GPRTemporary localGlobalObject(this);
238 GPRTemporary remoteGlobalObject(this);
239 GPRTemporary scratch(this);
241 JITCompiler::Jump notCell;
242 if (!isKnownCell(operand.node()))
243 notCell = m_jit.branchIfNotCell(JSValueRegs(argGPR));
245 JITCompiler::Jump isNotMasqueradesAsUndefined = m_jit.branchTest8(
247 JITCompiler::Address(argGPR, JSCell::typeInfoFlagsOffset()),
248 JITCompiler::TrustedImm32(MasqueradesAsUndefined));
249 done.append(isNotMasqueradesAsUndefined);
251 GPRReg localGlobalObjectGPR = localGlobalObject.gpr();
252 GPRReg remoteGlobalObjectGPR = remoteGlobalObject.gpr();
253 m_jit.move(JITCompiler::TrustedImmPtr(m_jit.graph().globalObjectFor(m_currentNode->origin.semantic)), localGlobalObjectGPR);
254 m_jit.emitLoadStructure(argGPR, resultGPR, scratch.gpr());
255 m_jit.loadPtr(JITCompiler::Address(resultGPR, Structure::globalObjectOffset()), remoteGlobalObjectGPR);
256 m_jit.comparePtr(JITCompiler::Equal, localGlobalObjectGPR, remoteGlobalObjectGPR, resultGPR);
257 done.append(m_jit.jump());
258 if (!isKnownCell(operand.node()))
259 notCell.link(&m_jit);
262 if (!isKnownNotOther(operand.node())) {
263 m_jit.move(argGPR, resultGPR);
264 m_jit.and64(JITCompiler::TrustedImm32(~TagBitUndefined), resultGPR);
265 m_jit.compare64(JITCompiler::Equal, resultGPR, JITCompiler::TrustedImm32(ValueNull), resultGPR);
270 m_jit.or32(TrustedImm32(ValueFalse), resultGPR);
271 jsValueResult(resultGPR, m_currentNode, DataFormatJSBoolean);
274 void SpeculativeJIT::nonSpeculativePeepholeBranchNullOrUndefined(Edge operand, Node* branchNode)
276 ASSERT_WITH_MESSAGE(!masqueradesAsUndefinedWatchpointIsStillValid() || !isKnownCell(operand.node()), "The Compare should have been eliminated, it is known to be always false.");
278 BasicBlock* taken = branchNode->branchData()->taken.block;
279 BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
281 JSValueOperand arg(this, operand, ManualOperandSpeculation);
282 GPRReg argGPR = arg.gpr();
284 GPRTemporary result(this, Reuse, arg);
285 GPRReg resultGPR = result.gpr();
287 // First, handle the case where "operand" is a cell.
288 if (masqueradesAsUndefinedWatchpointIsStillValid()) {
289 if (!isKnownNotCell(operand.node())) {
290 JITCompiler::Jump isCell = m_jit.branchIfCell(JSValueRegs(argGPR));
291 addBranch(isCell, notTaken);
294 GPRTemporary localGlobalObject(this);
295 GPRTemporary remoteGlobalObject(this);
296 GPRTemporary scratch(this);
298 JITCompiler::Jump notCell;
299 if (!isKnownCell(operand.node()))
300 notCell = m_jit.branchIfNotCell(JSValueRegs(argGPR));
302 branchTest8(JITCompiler::Zero,
303 JITCompiler::Address(argGPR, JSCell::typeInfoFlagsOffset()),
304 JITCompiler::TrustedImm32(MasqueradesAsUndefined), notTaken);
306 GPRReg localGlobalObjectGPR = localGlobalObject.gpr();
307 GPRReg remoteGlobalObjectGPR = remoteGlobalObject.gpr();
308 m_jit.move(TrustedImmPtr(m_jit.graph().globalObjectFor(m_currentNode->origin.semantic)), localGlobalObjectGPR);
309 m_jit.emitLoadStructure(argGPR, resultGPR, scratch.gpr());
310 m_jit.loadPtr(JITCompiler::Address(resultGPR, Structure::globalObjectOffset()), remoteGlobalObjectGPR);
311 branchPtr(JITCompiler::Equal, localGlobalObjectGPR, remoteGlobalObjectGPR, taken);
313 if (!isKnownCell(operand.node())) {
314 jump(notTaken, ForceJump);
315 notCell.link(&m_jit);
319 if (isKnownNotOther(operand.node()))
322 JITCompiler::RelationalCondition condition = JITCompiler::Equal;
323 if (taken == nextBlock()) {
324 condition = JITCompiler::NotEqual;
325 std::swap(taken, notTaken);
327 m_jit.move(argGPR, resultGPR);
328 m_jit.and64(JITCompiler::TrustedImm32(~TagBitUndefined), resultGPR);
329 branch64(condition, resultGPR, JITCompiler::TrustedImm64(ValueNull), taken);
334 void SpeculativeJIT::nonSpeculativePeepholeBranch(Node* node, Node* branchNode, MacroAssembler::RelationalCondition cond, S_JITOperation_EJJ helperFunction)
336 BasicBlock* taken = branchNode->branchData()->taken.block;
337 BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
339 JITCompiler::ResultCondition callResultCondition = JITCompiler::NonZero;
341 // The branch instruction will branch to the taken block.
342 // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
343 if (taken == nextBlock()) {
344 cond = JITCompiler::invert(cond);
345 callResultCondition = JITCompiler::Zero;
346 BasicBlock* tmp = taken;
351 JSValueOperand arg1(this, node->child1());
352 JSValueOperand arg2(this, node->child2());
353 GPRReg arg1GPR = arg1.gpr();
354 GPRReg arg2GPR = arg2.gpr();
356 JITCompiler::JumpList slowPath;
358 if (isKnownNotInteger(node->child1().node()) || isKnownNotInteger(node->child2().node())) {
359 GPRFlushedCallResult result(this);
360 GPRReg resultGPR = result.gpr();
366 callOperation(helperFunction, resultGPR, arg1GPR, arg2GPR);
367 m_jit.exceptionCheck();
369 branchTest32(callResultCondition, resultGPR, taken);
371 GPRTemporary result(this, Reuse, arg2);
372 GPRReg resultGPR = result.gpr();
377 if (!isKnownInteger(node->child1().node()))
378 slowPath.append(m_jit.branch64(MacroAssembler::Below, arg1GPR, GPRInfo::tagTypeNumberRegister));
379 if (!isKnownInteger(node->child2().node()))
380 slowPath.append(m_jit.branch64(MacroAssembler::Below, arg2GPR, GPRInfo::tagTypeNumberRegister));
382 branch32(cond, arg1GPR, arg2GPR, taken);
384 if (!isKnownInteger(node->child1().node()) || !isKnownInteger(node->child2().node())) {
385 jump(notTaken, ForceJump);
387 slowPath.link(&m_jit);
389 silentSpillAllRegisters(resultGPR);
390 callOperation(helperFunction, resultGPR, arg1GPR, arg2GPR);
391 silentFillAllRegisters(resultGPR);
392 m_jit.exceptionCheck();
394 branchTest32(callResultCondition, resultGPR, taken);
400 m_indexInBlock = m_block->size() - 1;
401 m_currentNode = branchNode;
404 template<typename JumpType>
405 class CompareAndBoxBooleanSlowPathGenerator
406 : public CallSlowPathGenerator<JumpType, S_JITOperation_EJJ, GPRReg> {
408 CompareAndBoxBooleanSlowPathGenerator(
409 JumpType from, SpeculativeJIT* jit,
410 S_JITOperation_EJJ function, GPRReg result, GPRReg arg1, GPRReg arg2)
411 : CallSlowPathGenerator<JumpType, S_JITOperation_EJJ, GPRReg>(
412 from, jit, function, NeedToSpill, ExceptionCheckRequirement::CheckNeeded, result)
419 void generateInternal(SpeculativeJIT* jit) override
422 this->recordCall(jit->callOperation(this->m_function, this->m_result, m_arg1, m_arg2));
423 jit->m_jit.and32(JITCompiler::TrustedImm32(1), this->m_result);
424 jit->m_jit.or32(JITCompiler::TrustedImm32(ValueFalse), this->m_result);
433 void SpeculativeJIT::nonSpeculativeNonPeepholeCompare(Node* node, MacroAssembler::RelationalCondition cond, S_JITOperation_EJJ helperFunction)
435 ASSERT(node->isBinaryUseKind(UntypedUse));
436 JSValueOperand arg1(this, node->child1());
437 JSValueOperand arg2(this, node->child2());
438 GPRReg arg1GPR = arg1.gpr();
439 GPRReg arg2GPR = arg2.gpr();
441 JITCompiler::JumpList slowPath;
443 if (isKnownNotInteger(node->child1().node()) || isKnownNotInteger(node->child2().node())) {
444 GPRFlushedCallResult result(this);
445 GPRReg resultGPR = result.gpr();
451 callOperation(helperFunction, resultGPR, arg1GPR, arg2GPR);
452 m_jit.exceptionCheck();
454 m_jit.or32(TrustedImm32(ValueFalse), resultGPR);
455 jsValueResult(resultGPR, m_currentNode, DataFormatJSBoolean, UseChildrenCalledExplicitly);
457 GPRTemporary result(this, Reuse, arg2);
458 GPRReg resultGPR = result.gpr();
463 if (!isKnownInteger(node->child1().node()))
464 slowPath.append(m_jit.branch64(MacroAssembler::Below, arg1GPR, GPRInfo::tagTypeNumberRegister));
465 if (!isKnownInteger(node->child2().node()))
466 slowPath.append(m_jit.branch64(MacroAssembler::Below, arg2GPR, GPRInfo::tagTypeNumberRegister));
468 m_jit.compare32(cond, arg1GPR, arg2GPR, resultGPR);
469 m_jit.or32(TrustedImm32(ValueFalse), resultGPR);
471 if (!isKnownInteger(node->child1().node()) || !isKnownInteger(node->child2().node())) {
472 addSlowPathGenerator(std::make_unique<CompareAndBoxBooleanSlowPathGenerator<JITCompiler::JumpList>>(
473 slowPath, this, helperFunction, resultGPR, arg1GPR, arg2GPR));
476 jsValueResult(resultGPR, m_currentNode, DataFormatJSBoolean, UseChildrenCalledExplicitly);
480 void SpeculativeJIT::nonSpeculativePeepholeStrictEq(Node* node, Node* branchNode, bool invert)
482 BasicBlock* taken = branchNode->branchData()->taken.block;
483 BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
485 // The branch instruction will branch to the taken block.
486 // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
487 if (taken == nextBlock()) {
489 BasicBlock* tmp = taken;
494 JSValueOperand arg1(this, node->child1());
495 JSValueOperand arg2(this, node->child2());
496 GPRReg arg1GPR = arg1.gpr();
497 GPRReg arg2GPR = arg2.gpr();
499 GPRTemporary result(this);
500 GPRReg resultGPR = result.gpr();
505 if (isKnownCell(node->child1().node()) && isKnownCell(node->child2().node())) {
506 // see if we get lucky: if the arguments are cells and they reference the same
507 // cell, then they must be strictly equal.
508 branch64(JITCompiler::Equal, arg1GPR, arg2GPR, invert ? notTaken : taken);
510 silentSpillAllRegisters(resultGPR);
511 callOperation(operationCompareStrictEqCell, resultGPR, arg1GPR, arg2GPR);
512 silentFillAllRegisters(resultGPR);
513 m_jit.exceptionCheck();
515 branchTest32(invert ? JITCompiler::Zero : JITCompiler::NonZero, resultGPR, taken);
517 m_jit.or64(arg1GPR, arg2GPR, resultGPR);
519 JITCompiler::Jump twoCellsCase = m_jit.branchTest64(JITCompiler::Zero, resultGPR, GPRInfo::tagMaskRegister);
521 JITCompiler::Jump leftOK = m_jit.branch64(JITCompiler::AboveOrEqual, arg1GPR, GPRInfo::tagTypeNumberRegister);
522 JITCompiler::Jump leftDouble = m_jit.branchTest64(JITCompiler::NonZero, arg1GPR, GPRInfo::tagTypeNumberRegister);
524 JITCompiler::Jump rightOK = m_jit.branch64(JITCompiler::AboveOrEqual, arg2GPR, GPRInfo::tagTypeNumberRegister);
525 JITCompiler::Jump rightDouble = m_jit.branchTest64(JITCompiler::NonZero, arg2GPR, GPRInfo::tagTypeNumberRegister);
526 rightOK.link(&m_jit);
528 branch64(invert ? JITCompiler::NotEqual : JITCompiler::Equal, arg1GPR, arg2GPR, taken);
529 jump(notTaken, ForceJump);
531 twoCellsCase.link(&m_jit);
532 branch64(JITCompiler::Equal, arg1GPR, arg2GPR, invert ? notTaken : taken);
534 leftDouble.link(&m_jit);
535 rightDouble.link(&m_jit);
537 silentSpillAllRegisters(resultGPR);
538 callOperation(operationCompareStrictEq, resultGPR, arg1GPR, arg2GPR);
539 silentFillAllRegisters(resultGPR);
540 m_jit.exceptionCheck();
542 branchTest32(invert ? JITCompiler::Zero : JITCompiler::NonZero, resultGPR, taken);
548 void SpeculativeJIT::nonSpeculativeNonPeepholeStrictEq(Node* node, bool invert)
550 JSValueOperand arg1(this, node->child1());
551 JSValueOperand arg2(this, node->child2());
552 GPRReg arg1GPR = arg1.gpr();
553 GPRReg arg2GPR = arg2.gpr();
555 GPRTemporary result(this);
556 GPRReg resultGPR = result.gpr();
561 if (isKnownCell(node->child1().node()) && isKnownCell(node->child2().node())) {
562 // see if we get lucky: if the arguments are cells and they reference the same
563 // cell, then they must be strictly equal.
564 // FIXME: this should flush registers instead of silent spill/fill.
565 JITCompiler::Jump notEqualCase = m_jit.branch64(JITCompiler::NotEqual, arg1GPR, arg2GPR);
567 m_jit.move(JITCompiler::TrustedImm64(JSValue::encode(jsBoolean(!invert))), resultGPR);
569 JITCompiler::Jump done = m_jit.jump();
571 notEqualCase.link(&m_jit);
573 silentSpillAllRegisters(resultGPR);
574 callOperation(operationCompareStrictEqCell, resultGPR, arg1GPR, arg2GPR);
575 silentFillAllRegisters(resultGPR);
576 m_jit.exceptionCheck();
578 m_jit.and64(JITCompiler::TrustedImm32(1), resultGPR);
579 m_jit.or32(JITCompiler::TrustedImm32(ValueFalse), resultGPR);
583 m_jit.or64(arg1GPR, arg2GPR, resultGPR);
585 JITCompiler::JumpList slowPathCases;
587 JITCompiler::Jump twoCellsCase = m_jit.branchTest64(JITCompiler::Zero, resultGPR, GPRInfo::tagMaskRegister);
589 JITCompiler::Jump leftOK = m_jit.branch64(JITCompiler::AboveOrEqual, arg1GPR, GPRInfo::tagTypeNumberRegister);
590 slowPathCases.append(m_jit.branchTest64(JITCompiler::NonZero, arg1GPR, GPRInfo::tagTypeNumberRegister));
592 JITCompiler::Jump rightOK = m_jit.branch64(JITCompiler::AboveOrEqual, arg2GPR, GPRInfo::tagTypeNumberRegister);
593 slowPathCases.append(m_jit.branchTest64(JITCompiler::NonZero, arg2GPR, GPRInfo::tagTypeNumberRegister));
594 rightOK.link(&m_jit);
596 m_jit.compare64(invert ? JITCompiler::NotEqual : JITCompiler::Equal, arg1GPR, arg2GPR, resultGPR);
597 m_jit.or32(JITCompiler::TrustedImm32(ValueFalse), resultGPR);
599 JITCompiler::Jump done = m_jit.jump();
601 twoCellsCase.link(&m_jit);
602 slowPathCases.append(m_jit.branch64(JITCompiler::NotEqual, arg1GPR, arg2GPR));
604 m_jit.move(JITCompiler::TrustedImm64(JSValue::encode(jsBoolean(!invert))), resultGPR);
606 addSlowPathGenerator(std::make_unique<CompareAndBoxBooleanSlowPathGenerator<MacroAssembler::JumpList>>(
607 slowPathCases, this, operationCompareStrictEq, resultGPR, arg1GPR,
613 jsValueResult(resultGPR, m_currentNode, DataFormatJSBoolean, UseChildrenCalledExplicitly);
616 void SpeculativeJIT::compileMiscStrictEq(Node* node)
618 JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
619 JSValueOperand op2(this, node->child2(), ManualOperandSpeculation);
620 GPRTemporary result(this);
622 if (node->child1().useKind() == MiscUse)
623 speculateMisc(node->child1(), op1.jsValueRegs());
624 if (node->child2().useKind() == MiscUse)
625 speculateMisc(node->child2(), op2.jsValueRegs());
627 m_jit.compare64(JITCompiler::Equal, op1.gpr(), op2.gpr(), result.gpr());
628 m_jit.or32(TrustedImm32(ValueFalse), result.gpr());
629 jsValueResult(result.gpr(), node, DataFormatJSBoolean);
632 void SpeculativeJIT::emitCall(Node* node)
634 CallLinkInfo::CallType callType;
635 bool isVarargs = false;
636 bool isForwardVarargs = false;
638 bool isEmulatedTail = false;
639 bool isDirect = false;
640 switch (node->op()) {
643 callType = CallLinkInfo::Call;
646 callType = CallLinkInfo::TailCall;
649 case TailCallInlinedCaller:
650 callType = CallLinkInfo::Call;
651 isEmulatedTail = true;
654 callType = CallLinkInfo::Construct;
657 callType = CallLinkInfo::CallVarargs;
660 case TailCallVarargs:
661 callType = CallLinkInfo::TailCallVarargs;
665 case TailCallVarargsInlinedCaller:
666 callType = CallLinkInfo::CallVarargs;
668 isEmulatedTail = true;
670 case ConstructVarargs:
671 callType = CallLinkInfo::ConstructVarargs;
674 case CallForwardVarargs:
675 callType = CallLinkInfo::CallVarargs;
676 isForwardVarargs = true;
678 case ConstructForwardVarargs:
679 callType = CallLinkInfo::ConstructVarargs;
680 isForwardVarargs = true;
682 case TailCallForwardVarargs:
683 callType = CallLinkInfo::TailCallVarargs;
685 isForwardVarargs = true;
687 case TailCallForwardVarargsInlinedCaller:
688 callType = CallLinkInfo::CallVarargs;
689 isEmulatedTail = true;
690 isForwardVarargs = true;
693 callType = CallLinkInfo::DirectCall;
696 case DirectConstruct:
697 callType = CallLinkInfo::DirectConstruct;
701 callType = CallLinkInfo::DirectTailCall;
705 case DirectTailCallInlinedCaller:
706 callType = CallLinkInfo::DirectCall;
707 isEmulatedTail = true;
711 DFG_CRASH(m_jit.graph(), node, "bad node type");
715 GPRReg calleeGPR = InvalidGPRReg;
716 CallFrameShuffleData shuffleData;
718 ExecutableBase* executable = nullptr;
719 FunctionExecutable* functionExecutable = nullptr;
721 executable = node->castOperand<ExecutableBase*>();
722 functionExecutable = jsDynamicCast<FunctionExecutable*>(executable);
725 unsigned numPassedArgs = 0;
726 unsigned numAllocatedArgs = 0;
728 // Gotta load the arguments somehow. Varargs is trickier.
729 if (isVarargs || isForwardVarargs) {
730 RELEASE_ASSERT(!isDirect);
731 CallVarargsData* data = node->callVarargsData();
734 unsigned numUsedStackSlots = m_jit.graph().m_nextMachineLocal;
736 if (isForwardVarargs) {
745 scratchGPR1 = JITCompiler::selectScratchGPR();
746 scratchGPR2 = JITCompiler::selectScratchGPR(scratchGPR1);
747 scratchGPR3 = JITCompiler::selectScratchGPR(scratchGPR1, scratchGPR2);
749 m_jit.move(TrustedImm32(numUsedStackSlots), scratchGPR2);
750 JITCompiler::JumpList slowCase;
751 InlineCallFrame* inlineCallFrame;
753 inlineCallFrame = node->child3()->origin.semantic.inlineCallFrame;
755 inlineCallFrame = node->origin.semantic.inlineCallFrame;
756 emitSetupVarargsFrameFastCase(m_jit, scratchGPR2, scratchGPR1, scratchGPR2, scratchGPR3, inlineCallFrame, data->firstVarArgOffset, slowCase);
757 JITCompiler::Jump done = m_jit.jump();
758 slowCase.link(&m_jit);
759 callOperation(operationThrowStackOverflowForVarargs);
760 m_jit.exceptionCheck();
761 m_jit.abortWithReason(DFGVarargsThrowingPathDidNotThrow);
763 resultGPR = scratchGPR2;
770 auto loadArgumentsGPR = [&] (GPRReg reservedGPR) {
771 if (reservedGPR != InvalidGPRReg)
773 JSValueOperand arguments(this, node->child3());
774 argumentsGPR = arguments.gpr();
775 if (reservedGPR != InvalidGPRReg)
779 scratchGPR1 = JITCompiler::selectScratchGPR(argumentsGPR, reservedGPR);
780 scratchGPR2 = JITCompiler::selectScratchGPR(argumentsGPR, scratchGPR1, reservedGPR);
781 scratchGPR3 = JITCompiler::selectScratchGPR(argumentsGPR, scratchGPR1, scratchGPR2, reservedGPR);
784 loadArgumentsGPR(InvalidGPRReg);
786 DFG_ASSERT(m_jit.graph(), node, isFlushed());
788 // Right now, arguments is in argumentsGPR and the register file is flushed.
789 callOperation(operationSizeFrameForVarargs, GPRInfo::returnValueGPR, argumentsGPR, numUsedStackSlots, data->firstVarArgOffset);
790 m_jit.exceptionCheck();
792 // Now we have the argument count of the callee frame, but we've lost the arguments operand.
793 // Reconstruct the arguments operand while preserving the callee frame.
794 loadArgumentsGPR(GPRInfo::returnValueGPR);
795 m_jit.move(TrustedImm32(numUsedStackSlots), scratchGPR1);
796 emitSetVarargsFrame(m_jit, GPRInfo::returnValueGPR, false, scratchGPR1, scratchGPR1);
797 m_jit.addPtr(TrustedImm32(-(sizeof(CallerFrameAndPC) + WTF::roundUpToMultipleOf(stackAlignmentBytes(), 5 * sizeof(void*)))), scratchGPR1, JITCompiler::stackPointerRegister);
799 callOperation(operationSetupVarargsFrame, GPRInfo::returnValueGPR, scratchGPR1, argumentsGPR, data->firstVarArgOffset, GPRInfo::returnValueGPR);
800 m_jit.exceptionCheck();
801 resultGPR = GPRInfo::returnValueGPR;
804 m_jit.addPtr(TrustedImm32(sizeof(CallerFrameAndPC)), resultGPR, JITCompiler::stackPointerRegister);
806 DFG_ASSERT(m_jit.graph(), node, isFlushed());
808 // We don't need the arguments array anymore.
812 // Now set up the "this" argument.
813 JSValueOperand thisArgument(this, node->child2());
814 GPRReg thisArgumentGPR = thisArgument.gpr();
817 m_jit.store64(thisArgumentGPR, JITCompiler::calleeArgumentSlot(0));
819 // The call instruction's first child is the function; the subsequent children are the
821 numPassedArgs = node->numChildren() - 1;
822 numAllocatedArgs = numPassedArgs;
824 if (functionExecutable) {
825 // Allocate more args if this would let us avoid arity checks. This is throttled by
826 // CallLinkInfo's limit. It's probably good to throttle it - if the callee wants a
827 // ginormous amount of argument space then it's better for them to do it so that when we
828 // make calls to other things, we don't waste space.
829 unsigned desiredNumAllocatedArgs = static_cast<unsigned>(functionExecutable->parameterCount()) + 1;
830 if (desiredNumAllocatedArgs <= Options::maximumDirectCallStackSize()) {
831 numAllocatedArgs = std::max(numAllocatedArgs, desiredNumAllocatedArgs);
833 // Whoever converts to DirectCall should do this adjustment. It's too late for us to
834 // do this adjustment now since we will have already emitted code that relied on the
835 // value of m_parameterSlots.
838 Graph::parameterSlotsForArgCount(numAllocatedArgs)
839 <= m_jit.graph().m_parameterSlots);
844 Edge calleeEdge = m_jit.graph().child(node, 0);
845 JSValueOperand callee(this, calleeEdge);
846 calleeGPR = callee.gpr();
850 shuffleData.tagTypeNumber = GPRInfo::tagTypeNumberRegister;
851 shuffleData.numLocals = m_jit.graph().frameRegisterCount();
852 shuffleData.callee = ValueRecovery::inGPR(calleeGPR, DataFormatJS);
853 shuffleData.args.resize(numAllocatedArgs);
855 for (unsigned i = 0; i < numPassedArgs; ++i) {
856 Edge argEdge = m_jit.graph().varArgChild(node, i + 1);
857 GenerationInfo& info = generationInfo(argEdge.node());
860 shuffleData.args[i] = info.recovery(argEdge->virtualRegister());
863 for (unsigned i = numPassedArgs; i < numAllocatedArgs; ++i)
864 shuffleData.args[i] = ValueRecovery::constant(jsUndefined());
866 shuffleData.setupCalleeSaveRegisters(m_jit.codeBlock());
868 m_jit.store32(MacroAssembler::TrustedImm32(numPassedArgs), JITCompiler::calleeFramePayloadSlot(CallFrameSlot::argumentCount));
870 for (unsigned i = 0; i < numPassedArgs; i++) {
871 Edge argEdge = m_jit.graph().m_varArgChildren[node->firstChild() + 1 + i];
872 JSValueOperand arg(this, argEdge);
873 GPRReg argGPR = arg.gpr();
876 m_jit.store64(argGPR, JITCompiler::calleeArgumentSlot(i));
879 for (unsigned i = numPassedArgs; i < numAllocatedArgs; ++i)
880 m_jit.storeTrustedValue(jsUndefined(), JITCompiler::calleeArgumentSlot(i));
884 if (!isTail || isVarargs || isForwardVarargs) {
885 Edge calleeEdge = m_jit.graph().child(node, 0);
886 JSValueOperand callee(this, calleeEdge);
887 calleeGPR = callee.gpr();
889 m_jit.store64(calleeGPR, JITCompiler::calleeFrameSlot(CallFrameSlot::callee));
894 CodeOrigin staticOrigin = node->origin.semantic;
895 ASSERT(!isTail || !staticOrigin.inlineCallFrame || !staticOrigin.inlineCallFrame->getCallerSkippingTailCalls());
896 ASSERT(!isEmulatedTail || (staticOrigin.inlineCallFrame && staticOrigin.inlineCallFrame->getCallerSkippingTailCalls()));
897 CodeOrigin dynamicOrigin =
898 isEmulatedTail ? *staticOrigin.inlineCallFrame->getCallerSkippingTailCalls() : staticOrigin;
900 CallSiteIndex callSite = m_jit.recordCallSiteAndGenerateExceptionHandlingOSRExitIfNeeded(dynamicOrigin, m_stream->size());
902 auto setResultAndResetStack = [&] () {
903 GPRFlushedCallResult result(this);
904 GPRReg resultGPR = result.gpr();
905 m_jit.move(GPRInfo::returnValueGPR, resultGPR);
907 jsValueResult(resultGPR, m_currentNode, DataFormatJS, UseChildrenCalledExplicitly);
909 // After the calls are done, we need to reestablish our stack
910 // pointer. We rely on this for varargs calls, calls with arity
911 // mismatch (the callframe is slided) and tail calls.
912 m_jit.addPtr(TrustedImm32(m_jit.graph().stackPointerOffset() * sizeof(Register)), GPRInfo::callFrameRegister, JITCompiler::stackPointerRegister);
915 CallLinkInfo* callLinkInfo = m_jit.codeBlock()->addCallLinkInfo();
916 callLinkInfo->setUpCall(callType, m_currentNode->origin.semantic, calleeGPR);
918 if (node->op() == CallEval) {
919 // We want to call operationCallEval but we don't want to overwrite the parameter area in
920 // which we have created a prototypical eval call frame. This means that we have to
921 // subtract stack to make room for the call. Lucky for us, at this point we have the whole
922 // register file to ourselves.
924 m_jit.emitStoreCallSiteIndex(callSite);
925 m_jit.addPtr(TrustedImm32(-static_cast<ptrdiff_t>(sizeof(CallerFrameAndPC))), JITCompiler::stackPointerRegister, GPRInfo::regT0);
926 m_jit.storePtr(GPRInfo::callFrameRegister, JITCompiler::Address(GPRInfo::regT0, CallFrame::callerFrameOffset()));
928 // Now we need to make room for:
929 // - The caller frame and PC of a call to operationCallEval.
930 // - Potentially two arguments on the stack.
931 unsigned requiredBytes = sizeof(CallerFrameAndPC) + sizeof(ExecState*) * 2;
932 requiredBytes = WTF::roundUpToMultipleOf(stackAlignmentBytes(), requiredBytes);
933 m_jit.subPtr(TrustedImm32(requiredBytes), JITCompiler::stackPointerRegister);
934 m_jit.setupArgumentsWithExecState(GPRInfo::regT0);
935 prepareForExternalCall();
936 m_jit.appendCall(operationCallEval);
937 m_jit.exceptionCheck();
938 JITCompiler::Jump done = m_jit.branchTest64(JITCompiler::NonZero, GPRInfo::returnValueGPR);
940 // This is the part where we meant to make a normal call. Oops.
941 m_jit.addPtr(TrustedImm32(requiredBytes), JITCompiler::stackPointerRegister);
942 m_jit.load64(JITCompiler::calleeFrameSlot(CallFrameSlot::callee), GPRInfo::regT0);
943 m_jit.emitDumbVirtualCall(callLinkInfo);
946 setResultAndResetStack();
951 callLinkInfo->setExecutableDuringCompilation(executable);
952 callLinkInfo->setMaxNumArguments(numAllocatedArgs);
955 RELEASE_ASSERT(node->op() == DirectTailCall);
957 JITCompiler::PatchableJump patchableJump = m_jit.patchableJump();
958 JITCompiler::Label mainPath = m_jit.label();
960 m_jit.emitStoreCallSiteIndex(callSite);
962 callLinkInfo->setFrameShuffleData(shuffleData);
963 CallFrameShuffler(m_jit, shuffleData).prepareForTailCall();
965 JITCompiler::Call call = m_jit.nearTailCall();
967 JITCompiler::Label slowPath = m_jit.label();
968 patchableJump.m_jump.linkTo(slowPath, &m_jit);
970 silentSpillAllRegisters(InvalidGPRReg);
971 callOperation(operationLinkDirectCall, callLinkInfo, calleeGPR);
972 silentFillAllRegisters(InvalidGPRReg);
973 m_jit.exceptionCheck();
974 m_jit.jump().linkTo(mainPath, &m_jit);
978 m_jit.addJSDirectTailCall(patchableJump, call, slowPath, callLinkInfo);
982 JITCompiler::Label mainPath = m_jit.label();
984 m_jit.emitStoreCallSiteIndex(callSite);
986 JITCompiler::Call call = m_jit.nearCall();
987 JITCompiler::Jump done = m_jit.jump();
989 JITCompiler::Label slowPath = m_jit.label();
991 m_jit.pop(JITCompiler::selectScratchGPR(calleeGPR));
993 callOperation(operationLinkDirectCall, callLinkInfo, calleeGPR);
994 m_jit.exceptionCheck();
995 m_jit.jump().linkTo(mainPath, &m_jit);
999 setResultAndResetStack();
1001 m_jit.addJSDirectCall(call, slowPath, callLinkInfo);
1005 m_jit.emitStoreCallSiteIndex(callSite);
1007 JITCompiler::DataLabelPtr targetToCheck;
1008 JITCompiler::Jump slowPath = m_jit.branchPtrWithPatch(MacroAssembler::NotEqual, calleeGPR, targetToCheck, MacroAssembler::TrustedImmPtr(0));
1011 if (node->op() == TailCall) {
1012 callLinkInfo->setFrameShuffleData(shuffleData);
1013 CallFrameShuffler(m_jit, shuffleData).prepareForTailCall();
1015 m_jit.emitRestoreCalleeSaves();
1016 m_jit.prepareForTailCallSlow();
1020 JITCompiler::Call fastCall = isTail ? m_jit.nearTailCall() : m_jit.nearCall();
1022 JITCompiler::Jump done = m_jit.jump();
1024 slowPath.link(&m_jit);
1026 if (node->op() == TailCall) {
1027 CallFrameShuffler callFrameShuffler(m_jit, shuffleData);
1028 callFrameShuffler.setCalleeJSValueRegs(JSValueRegs(GPRInfo::regT0));
1029 callFrameShuffler.prepareForSlowPath();
1031 m_jit.move(calleeGPR, GPRInfo::regT0); // Callee needs to be in regT0
1034 m_jit.emitRestoreCalleeSaves(); // This needs to happen after we moved calleeGPR to regT0
1037 m_jit.move(MacroAssembler::TrustedImmPtr(callLinkInfo), GPRInfo::regT2); // Link info needs to be in regT2
1038 JITCompiler::Call slowCall = m_jit.nearCall();
1043 m_jit.abortWithReason(JITDidReturnFromTailCall);
1045 setResultAndResetStack();
1047 m_jit.addJSCall(fastCall, slowCall, targetToCheck, callLinkInfo);
1050 // Clang should allow unreachable [[clang::fallthrough]] in template functions if any template expansion uses it
1051 // http://llvm.org/bugs/show_bug.cgi?id=18619
1052 #if COMPILER(CLANG) && defined(__has_warning)
1053 #pragma clang diagnostic push
1054 #if __has_warning("-Wimplicit-fallthrough")
1055 #pragma clang diagnostic ignored "-Wimplicit-fallthrough"
1058 template<bool strict>
1059 GPRReg SpeculativeJIT::fillSpeculateInt32Internal(Edge edge, DataFormat& returnFormat)
1061 AbstractValue& value = m_state.forNode(edge);
1062 SpeculatedType type = value.m_type;
1063 ASSERT(edge.useKind() != KnownInt32Use || !(value.m_type & ~SpecInt32Only));
1065 m_interpreter.filter(value, SpecInt32Only);
1066 if (value.isClear()) {
1067 terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
1068 returnFormat = DataFormatInt32;
1072 VirtualRegister virtualRegister = edge->virtualRegister();
1073 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1075 switch (info.registerFormat()) {
1076 case DataFormatNone: {
1077 GPRReg gpr = allocate();
1079 if (edge->hasConstant()) {
1080 m_gprs.retain(gpr, virtualRegister, SpillOrderConstant);
1081 ASSERT(edge->isInt32Constant());
1082 m_jit.move(MacroAssembler::Imm32(edge->asInt32()), gpr);
1083 info.fillInt32(*m_stream, gpr);
1084 returnFormat = DataFormatInt32;
1088 DataFormat spillFormat = info.spillFormat();
1090 DFG_ASSERT(m_jit.graph(), m_currentNode, (spillFormat & DataFormatJS) || spillFormat == DataFormatInt32);
1092 m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
1094 if (spillFormat == DataFormatJSInt32 || spillFormat == DataFormatInt32) {
1095 // If we know this was spilled as an integer we can fill without checking.
1097 m_jit.load32(JITCompiler::addressFor(virtualRegister), gpr);
1098 info.fillInt32(*m_stream, gpr);
1099 returnFormat = DataFormatInt32;
1102 if (spillFormat == DataFormatInt32) {
1103 m_jit.load32(JITCompiler::addressFor(virtualRegister), gpr);
1104 info.fillInt32(*m_stream, gpr);
1105 returnFormat = DataFormatInt32;
1107 m_jit.load64(JITCompiler::addressFor(virtualRegister), gpr);
1108 info.fillJSValue(*m_stream, gpr, DataFormatJSInt32);
1109 returnFormat = DataFormatJSInt32;
1113 m_jit.load64(JITCompiler::addressFor(virtualRegister), gpr);
1115 // Fill as JSValue, and fall through.
1116 info.fillJSValue(*m_stream, gpr, DataFormatJSInt32);
1121 case DataFormatJS: {
1122 DFG_ASSERT(m_jit.graph(), m_currentNode, !(type & SpecInt52Only));
1123 // Check the value is an integer.
1124 GPRReg gpr = info.gpr();
1126 if (type & ~SpecInt32Only)
1127 speculationCheck(BadType, JSValueRegs(gpr), edge, m_jit.branch64(MacroAssembler::Below, gpr, GPRInfo::tagTypeNumberRegister));
1128 info.fillJSValue(*m_stream, gpr, DataFormatJSInt32);
1129 // If !strict we're done, return.
1131 returnFormat = DataFormatJSInt32;
1134 // else fall through & handle as DataFormatJSInt32.
1139 case DataFormatJSInt32: {
1140 // In a strict fill we need to strip off the value tag.
1142 GPRReg gpr = info.gpr();
1144 // If the register has already been locked we need to take a copy.
1145 // If not, we'll zero extend in place, so mark on the info that this is now type DataFormatInt32, not DataFormatJSInt32.
1146 if (m_gprs.isLocked(gpr))
1147 result = allocate();
1150 info.fillInt32(*m_stream, gpr);
1153 m_jit.zeroExtend32ToPtr(gpr, result);
1154 returnFormat = DataFormatInt32;
1158 GPRReg gpr = info.gpr();
1160 returnFormat = DataFormatJSInt32;
1164 case DataFormatInt32: {
1165 GPRReg gpr = info.gpr();
1167 returnFormat = DataFormatInt32;
1171 case DataFormatJSDouble:
1172 case DataFormatCell:
1173 case DataFormatBoolean:
1174 case DataFormatJSCell:
1175 case DataFormatJSBoolean:
1176 case DataFormatDouble:
1177 case DataFormatStorage:
1178 case DataFormatInt52:
1179 case DataFormatStrictInt52:
1180 DFG_CRASH(m_jit.graph(), m_currentNode, "Bad data format");
1183 DFG_CRASH(m_jit.graph(), m_currentNode, "Corrupt data format");
1184 return InvalidGPRReg;
1187 #if COMPILER(CLANG) && defined(__has_warning)
1188 #pragma clang diagnostic pop
1191 GPRReg SpeculativeJIT::fillSpeculateInt32(Edge edge, DataFormat& returnFormat)
1193 return fillSpeculateInt32Internal<false>(edge, returnFormat);
1196 GPRReg SpeculativeJIT::fillSpeculateInt32Strict(Edge edge)
1198 DataFormat mustBeDataFormatInt32;
1199 GPRReg result = fillSpeculateInt32Internal<true>(edge, mustBeDataFormatInt32);
1200 DFG_ASSERT(m_jit.graph(), m_currentNode, mustBeDataFormatInt32 == DataFormatInt32);
1204 GPRReg SpeculativeJIT::fillSpeculateInt52(Edge edge, DataFormat desiredFormat)
1206 ASSERT(desiredFormat == DataFormatInt52 || desiredFormat == DataFormatStrictInt52);
1207 AbstractValue& value = m_state.forNode(edge);
1209 m_interpreter.filter(value, SpecAnyInt);
1210 if (value.isClear()) {
1211 terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
1215 VirtualRegister virtualRegister = edge->virtualRegister();
1216 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1218 switch (info.registerFormat()) {
1219 case DataFormatNone: {
1220 GPRReg gpr = allocate();
1222 if (edge->hasConstant()) {
1223 JSValue jsValue = edge->asJSValue();
1224 ASSERT(jsValue.isAnyInt());
1225 m_gprs.retain(gpr, virtualRegister, SpillOrderConstant);
1226 int64_t value = jsValue.asAnyInt();
1227 if (desiredFormat == DataFormatInt52)
1228 value = value << JSValue::int52ShiftAmount;
1229 m_jit.move(MacroAssembler::Imm64(value), gpr);
1230 info.fillGPR(*m_stream, gpr, desiredFormat);
1234 DataFormat spillFormat = info.spillFormat();
1236 DFG_ASSERT(m_jit.graph(), m_currentNode, spillFormat == DataFormatInt52 || spillFormat == DataFormatStrictInt52);
1238 m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
1240 m_jit.load64(JITCompiler::addressFor(virtualRegister), gpr);
1241 if (desiredFormat == DataFormatStrictInt52) {
1242 if (spillFormat == DataFormatInt52)
1243 m_jit.rshift64(TrustedImm32(JSValue::int52ShiftAmount), gpr);
1244 info.fillStrictInt52(*m_stream, gpr);
1247 if (spillFormat == DataFormatStrictInt52)
1248 m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), gpr);
1249 info.fillInt52(*m_stream, gpr);
1253 case DataFormatStrictInt52: {
1254 GPRReg gpr = info.gpr();
1255 bool wasLocked = m_gprs.isLocked(gpr);
1257 if (desiredFormat == DataFormatStrictInt52)
1260 GPRReg result = allocate();
1261 m_jit.move(gpr, result);
1265 info.fillInt52(*m_stream, gpr);
1266 m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), gpr);
1270 case DataFormatInt52: {
1271 GPRReg gpr = info.gpr();
1272 bool wasLocked = m_gprs.isLocked(gpr);
1274 if (desiredFormat == DataFormatInt52)
1277 GPRReg result = allocate();
1278 m_jit.move(gpr, result);
1282 info.fillStrictInt52(*m_stream, gpr);
1283 m_jit.rshift64(TrustedImm32(JSValue::int52ShiftAmount), gpr);
1288 DFG_CRASH(m_jit.graph(), m_currentNode, "Bad data format");
1289 return InvalidGPRReg;
1293 FPRReg SpeculativeJIT::fillSpeculateDouble(Edge edge)
1295 ASSERT(edge.useKind() == DoubleRepUse || edge.useKind() == DoubleRepRealUse || edge.useKind() == DoubleRepAnyIntUse);
1296 ASSERT(edge->hasDoubleResult());
1297 VirtualRegister virtualRegister = edge->virtualRegister();
1298 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1300 if (info.registerFormat() == DataFormatNone) {
1301 if (edge->hasConstant()) {
1302 if (edge->isNumberConstant()) {
1303 FPRReg fpr = fprAllocate();
1304 int64_t doubleAsInt = reinterpretDoubleToInt64(edge->asNumber());
1306 m_jit.moveZeroToDouble(fpr);
1308 GPRReg gpr = allocate();
1309 m_jit.move(MacroAssembler::Imm64(doubleAsInt), gpr);
1310 m_jit.move64ToDouble(gpr, fpr);
1314 m_fprs.retain(fpr, virtualRegister, SpillOrderDouble);
1315 info.fillDouble(*m_stream, fpr);
1318 terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
1319 return fprAllocate();
1322 DataFormat spillFormat = info.spillFormat();
1323 if (spillFormat != DataFormatDouble) {
1325 m_jit.graph(), m_currentNode, toCString(
1326 "Expected ", edge, " to have double format but instead it is spilled as ",
1327 dataFormatToString(spillFormat)).data());
1329 DFG_ASSERT(m_jit.graph(), m_currentNode, spillFormat == DataFormatDouble);
1330 FPRReg fpr = fprAllocate();
1331 m_jit.loadDouble(JITCompiler::addressFor(virtualRegister), fpr);
1332 m_fprs.retain(fpr, virtualRegister, SpillOrderDouble);
1333 info.fillDouble(*m_stream, fpr);
1337 DFG_ASSERT(m_jit.graph(), m_currentNode, info.registerFormat() == DataFormatDouble);
1338 FPRReg fpr = info.fpr();
1343 GPRReg SpeculativeJIT::fillSpeculateCell(Edge edge)
1345 AbstractValue& value = m_state.forNode(edge);
1346 SpeculatedType type = value.m_type;
1347 ASSERT((edge.useKind() != KnownCellUse && edge.useKind() != KnownStringUse) || !(value.m_type & ~SpecCell));
1349 m_interpreter.filter(value, SpecCell);
1350 if (value.isClear()) {
1351 terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
1355 VirtualRegister virtualRegister = edge->virtualRegister();
1356 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1358 switch (info.registerFormat()) {
1359 case DataFormatNone: {
1360 GPRReg gpr = allocate();
1362 if (edge->hasConstant()) {
1363 JSValue jsValue = edge->asJSValue();
1364 m_gprs.retain(gpr, virtualRegister, SpillOrderConstant);
1365 m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsValue)), gpr);
1366 info.fillJSValue(*m_stream, gpr, DataFormatJSCell);
1370 m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
1371 m_jit.load64(JITCompiler::addressFor(virtualRegister), gpr);
1373 info.fillJSValue(*m_stream, gpr, DataFormatJS);
1374 if (type & ~SpecCell)
1375 speculationCheck(BadType, JSValueRegs(gpr), edge, m_jit.branchIfNotCell(JSValueRegs(gpr)));
1376 info.fillJSValue(*m_stream, gpr, DataFormatJSCell);
1380 case DataFormatCell:
1381 case DataFormatJSCell: {
1382 GPRReg gpr = info.gpr();
1384 if (!ASSERT_DISABLED) {
1385 MacroAssembler::Jump checkCell = m_jit.branchIfCell(JSValueRegs(gpr));
1386 m_jit.abortWithReason(DFGIsNotCell);
1387 checkCell.link(&m_jit);
1392 case DataFormatJS: {
1393 GPRReg gpr = info.gpr();
1395 if (type & ~SpecCell)
1396 speculationCheck(BadType, JSValueRegs(gpr), edge, m_jit.branchIfNotCell(JSValueRegs(gpr)));
1397 info.fillJSValue(*m_stream, gpr, DataFormatJSCell);
1401 case DataFormatJSInt32:
1402 case DataFormatInt32:
1403 case DataFormatJSDouble:
1404 case DataFormatJSBoolean:
1405 case DataFormatBoolean:
1406 case DataFormatDouble:
1407 case DataFormatStorage:
1408 case DataFormatInt52:
1409 case DataFormatStrictInt52:
1410 DFG_CRASH(m_jit.graph(), m_currentNode, "Bad data format");
1413 DFG_CRASH(m_jit.graph(), m_currentNode, "Corrupt data format");
1414 return InvalidGPRReg;
1418 GPRReg SpeculativeJIT::fillSpeculateBoolean(Edge edge)
1420 AbstractValue& value = m_state.forNode(edge);
1421 SpeculatedType type = value.m_type;
1422 ASSERT(edge.useKind() != KnownBooleanUse || !(value.m_type & ~SpecBoolean));
1424 m_interpreter.filter(value, SpecBoolean);
1425 if (value.isClear()) {
1426 terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
1430 VirtualRegister virtualRegister = edge->virtualRegister();
1431 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
1433 switch (info.registerFormat()) {
1434 case DataFormatNone: {
1435 GPRReg gpr = allocate();
1437 if (edge->hasConstant()) {
1438 JSValue jsValue = edge->asJSValue();
1439 m_gprs.retain(gpr, virtualRegister, SpillOrderConstant);
1440 m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsValue)), gpr);
1441 info.fillJSValue(*m_stream, gpr, DataFormatJSBoolean);
1444 DFG_ASSERT(m_jit.graph(), m_currentNode, info.spillFormat() & DataFormatJS);
1445 m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
1446 m_jit.load64(JITCompiler::addressFor(virtualRegister), gpr);
1448 info.fillJSValue(*m_stream, gpr, DataFormatJS);
1449 if (type & ~SpecBoolean) {
1450 m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), gpr);
1451 speculationCheck(BadType, JSValueRegs(gpr), edge, m_jit.branchTest64(MacroAssembler::NonZero, gpr, TrustedImm32(static_cast<int32_t>(~1))), SpeculationRecovery(BooleanSpeculationCheck, gpr, InvalidGPRReg));
1452 m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), gpr);
1454 info.fillJSValue(*m_stream, gpr, DataFormatJSBoolean);
1458 case DataFormatBoolean:
1459 case DataFormatJSBoolean: {
1460 GPRReg gpr = info.gpr();
1465 case DataFormatJS: {
1466 GPRReg gpr = info.gpr();
1468 if (type & ~SpecBoolean) {
1469 m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), gpr);
1470 speculationCheck(BadType, JSValueRegs(gpr), edge, m_jit.branchTest64(MacroAssembler::NonZero, gpr, TrustedImm32(static_cast<int32_t>(~1))), SpeculationRecovery(BooleanSpeculationCheck, gpr, InvalidGPRReg));
1471 m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), gpr);
1473 info.fillJSValue(*m_stream, gpr, DataFormatJSBoolean);
1477 case DataFormatJSInt32:
1478 case DataFormatInt32:
1479 case DataFormatJSDouble:
1480 case DataFormatJSCell:
1481 case DataFormatCell:
1482 case DataFormatDouble:
1483 case DataFormatStorage:
1484 case DataFormatInt52:
1485 case DataFormatStrictInt52:
1486 DFG_CRASH(m_jit.graph(), m_currentNode, "Bad data format");
1489 DFG_CRASH(m_jit.graph(), m_currentNode, "Corrupt data format");
1490 return InvalidGPRReg;
1494 void SpeculativeJIT::compileObjectEquality(Node* node)
1496 SpeculateCellOperand op1(this, node->child1());
1497 SpeculateCellOperand op2(this, node->child2());
1498 GPRTemporary result(this, Reuse, op1);
1500 GPRReg op1GPR = op1.gpr();
1501 GPRReg op2GPR = op2.gpr();
1502 GPRReg resultGPR = result.gpr();
1504 if (masqueradesAsUndefinedWatchpointIsStillValid()) {
1506 JSValueSource::unboxedCell(op1GPR), node->child1(), SpecObject, m_jit.branchIfNotObject(op1GPR));
1508 JSValueSource::unboxedCell(op2GPR), node->child2(), SpecObject, m_jit.branchIfNotObject(op2GPR));
1511 JSValueSource::unboxedCell(op1GPR), node->child1(), SpecObject, m_jit.branchIfNotObject(op1GPR));
1512 speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1514 MacroAssembler::NonZero,
1515 MacroAssembler::Address(op1GPR, JSCell::typeInfoFlagsOffset()),
1516 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1519 JSValueSource::unboxedCell(op2GPR), node->child2(), SpecObject, m_jit.branchIfNotObject(op2GPR));
1520 speculationCheck(BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1522 MacroAssembler::NonZero,
1523 MacroAssembler::Address(op2GPR, JSCell::typeInfoFlagsOffset()),
1524 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1527 m_jit.compare64(MacroAssembler::Equal, op1GPR, op2GPR, resultGPR);
1528 m_jit.or32(TrustedImm32(ValueFalse), resultGPR);
1529 jsValueResult(resultGPR, m_currentNode, DataFormatJSBoolean);
1532 void SpeculativeJIT::compileObjectStrictEquality(Edge objectChild, Edge otherChild)
1534 SpeculateCellOperand op1(this, objectChild);
1535 JSValueOperand op2(this, otherChild);
1536 GPRTemporary result(this);
1538 GPRReg op1GPR = op1.gpr();
1539 GPRReg op2GPR = op2.gpr();
1540 GPRReg resultGPR = result.gpr();
1542 DFG_TYPE_CHECK(JSValueSource::unboxedCell(op1GPR), objectChild, SpecObject, m_jit.branchIfNotObject(op1GPR));
1544 // At this point we know that we can perform a straight-forward equality comparison on pointer
1545 // values because we are doing strict equality.
1546 m_jit.compare64(MacroAssembler::Equal, op1GPR, op2GPR, resultGPR);
1547 m_jit.or32(TrustedImm32(ValueFalse), resultGPR);
1548 jsValueResult(resultGPR, m_currentNode, DataFormatJSBoolean);
1551 void SpeculativeJIT::compilePeepHoleObjectStrictEquality(Edge objectChild, Edge otherChild, Node* branchNode)
1553 BasicBlock* taken = branchNode->branchData()->taken.block;
1554 BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1556 SpeculateCellOperand op1(this, objectChild);
1557 JSValueOperand op2(this, otherChild);
1559 GPRReg op1GPR = op1.gpr();
1560 GPRReg op2GPR = op2.gpr();
1562 DFG_TYPE_CHECK(JSValueSource::unboxedCell(op1GPR), objectChild, SpecObject, m_jit.branchIfNotObject(op1GPR));
1564 if (taken == nextBlock()) {
1565 branchPtr(MacroAssembler::NotEqual, op1GPR, op2GPR, notTaken);
1568 branchPtr(MacroAssembler::Equal, op1GPR, op2GPR, taken);
1573 void SpeculativeJIT::compileObjectToObjectOrOtherEquality(Edge leftChild, Edge rightChild)
1575 SpeculateCellOperand op1(this, leftChild);
1576 JSValueOperand op2(this, rightChild, ManualOperandSpeculation);
1577 GPRTemporary result(this);
1579 GPRReg op1GPR = op1.gpr();
1580 GPRReg op2GPR = op2.gpr();
1581 GPRReg resultGPR = result.gpr();
1583 bool masqueradesAsUndefinedWatchpointValid =
1584 masqueradesAsUndefinedWatchpointIsStillValid();
1586 if (masqueradesAsUndefinedWatchpointValid) {
1588 JSValueSource::unboxedCell(op1GPR), leftChild, SpecObject, m_jit.branchIfNotObject(op1GPR));
1591 JSValueSource::unboxedCell(op1GPR), leftChild, SpecObject, m_jit.branchIfNotObject(op1GPR));
1592 speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), leftChild,
1594 MacroAssembler::NonZero,
1595 MacroAssembler::Address(op1GPR, JSCell::typeInfoFlagsOffset()),
1596 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1599 // It seems that most of the time when programs do a == b where b may be either null/undefined
1600 // or an object, b is usually an object. Balance the branches to make that case fast.
1601 MacroAssembler::Jump rightNotCell = m_jit.branchIfNotCell(JSValueRegs(op2GPR));
1603 // We know that within this branch, rightChild must be a cell.
1604 if (masqueradesAsUndefinedWatchpointValid) {
1606 JSValueRegs(op2GPR), rightChild, (~SpecCell) | SpecObject, m_jit.branchIfNotObject(op2GPR));
1609 JSValueRegs(op2GPR), rightChild, (~SpecCell) | SpecObject, m_jit.branchIfNotObject(op2GPR));
1610 speculationCheck(BadType, JSValueRegs(op2GPR), rightChild,
1612 MacroAssembler::NonZero,
1613 MacroAssembler::Address(op2GPR, JSCell::typeInfoFlagsOffset()),
1614 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1617 // At this point we know that we can perform a straight-forward equality comparison on pointer
1618 // values because both left and right are pointers to objects that have no special equality
1620 m_jit.compare64(MacroAssembler::Equal, op1GPR, op2GPR, resultGPR);
1621 MacroAssembler::Jump done = m_jit.jump();
1623 rightNotCell.link(&m_jit);
1625 // We know that within this branch, rightChild must not be a cell. Check if that is enough to
1626 // prove that it is either null or undefined.
1627 if (needsTypeCheck(rightChild, SpecCell | SpecOther)) {
1628 m_jit.move(op2GPR, resultGPR);
1629 m_jit.and64(MacroAssembler::TrustedImm32(~TagBitUndefined), resultGPR);
1632 JSValueRegs(op2GPR), rightChild, SpecCell | SpecOther,
1634 MacroAssembler::NotEqual, resultGPR,
1635 MacroAssembler::TrustedImm64(ValueNull)));
1637 m_jit.move(TrustedImm32(0), result.gpr());
1640 m_jit.or32(TrustedImm32(ValueFalse), resultGPR);
1641 jsValueResult(resultGPR, m_currentNode, DataFormatJSBoolean);
1644 void SpeculativeJIT::compilePeepHoleObjectToObjectOrOtherEquality(Edge leftChild, Edge rightChild, Node* branchNode)
1646 BasicBlock* taken = branchNode->branchData()->taken.block;
1647 BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1649 SpeculateCellOperand op1(this, leftChild);
1650 JSValueOperand op2(this, rightChild, ManualOperandSpeculation);
1651 GPRTemporary result(this);
1653 GPRReg op1GPR = op1.gpr();
1654 GPRReg op2GPR = op2.gpr();
1655 GPRReg resultGPR = result.gpr();
1657 bool masqueradesAsUndefinedWatchpointValid =
1658 masqueradesAsUndefinedWatchpointIsStillValid();
1660 if (masqueradesAsUndefinedWatchpointValid) {
1662 JSValueSource::unboxedCell(op1GPR), leftChild, SpecObject, m_jit.branchIfNotObject(op1GPR));
1665 JSValueSource::unboxedCell(op1GPR), leftChild, SpecObject, m_jit.branchIfNotObject(op1GPR));
1666 speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), leftChild,
1668 MacroAssembler::NonZero,
1669 MacroAssembler::Address(op1GPR, JSCell::typeInfoFlagsOffset()),
1670 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1673 // It seems that most of the time when programs do a == b where b may be either null/undefined
1674 // or an object, b is usually an object. Balance the branches to make that case fast.
1675 MacroAssembler::Jump rightNotCell = m_jit.branchIfNotCell(JSValueRegs(op2GPR));
1677 // We know that within this branch, rightChild must be a cell.
1678 if (masqueradesAsUndefinedWatchpointValid) {
1680 JSValueRegs(op2GPR), rightChild, (~SpecCell) | SpecObject, m_jit.branchIfNotObject(op2GPR));
1683 JSValueRegs(op2GPR), rightChild, (~SpecCell) | SpecObject, m_jit.branchIfNotObject(op2GPR));
1684 speculationCheck(BadType, JSValueRegs(op2GPR), rightChild,
1686 MacroAssembler::NonZero,
1687 MacroAssembler::Address(op2GPR, JSCell::typeInfoFlagsOffset()),
1688 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1691 // At this point we know that we can perform a straight-forward equality comparison on pointer
1692 // values because both left and right are pointers to objects that have no special equality
1694 branch64(MacroAssembler::Equal, op1GPR, op2GPR, taken);
1696 // We know that within this branch, rightChild must not be a cell. Check if that is enough to
1697 // prove that it is either null or undefined.
1698 if (!needsTypeCheck(rightChild, SpecCell | SpecOther))
1699 rightNotCell.link(&m_jit);
1701 jump(notTaken, ForceJump);
1703 rightNotCell.link(&m_jit);
1704 m_jit.move(op2GPR, resultGPR);
1705 m_jit.and64(MacroAssembler::TrustedImm32(~TagBitUndefined), resultGPR);
1708 JSValueRegs(op2GPR), rightChild, SpecCell | SpecOther, m_jit.branch64(
1709 MacroAssembler::NotEqual, resultGPR,
1710 MacroAssembler::TrustedImm64(ValueNull)));
1716 void SpeculativeJIT::compileSymbolUntypedEquality(Node* node, Edge symbolEdge, Edge untypedEdge)
1718 SpeculateCellOperand symbol(this, symbolEdge);
1719 JSValueOperand untyped(this, untypedEdge);
1720 GPRTemporary result(this, Reuse, symbol, untyped);
1722 GPRReg symbolGPR = symbol.gpr();
1723 GPRReg untypedGPR = untyped.gpr();
1724 GPRReg resultGPR = result.gpr();
1726 speculateSymbol(symbolEdge, symbolGPR);
1728 // At this point we know that we can perform a straight-forward equality comparison on pointer
1729 // values because we are doing strict equality.
1730 m_jit.compare64(MacroAssembler::Equal, symbolGPR, untypedGPR, resultGPR);
1731 unblessedBooleanResult(resultGPR, node);
1734 void SpeculativeJIT::compileInt32Compare(Node* node, MacroAssembler::RelationalCondition condition)
1736 if (node->child1()->isInt32Constant()) {
1737 SpeculateInt32Operand op2(this, node->child2());
1738 GPRTemporary result(this, Reuse, op2);
1739 int32_t imm = node->child1()->asInt32();
1740 m_jit.compare32(condition, JITCompiler::Imm32(imm), op2.gpr(), result.gpr());
1742 // If we add a DataFormatBool, we should use it here.
1743 m_jit.or32(TrustedImm32(ValueFalse), result.gpr());
1744 jsValueResult(result.gpr(), m_currentNode, DataFormatJSBoolean);
1745 } else if (node->child2()->isInt32Constant()) {
1746 SpeculateInt32Operand op1(this, node->child1());
1747 GPRTemporary result(this, Reuse, op1);
1748 int32_t imm = node->child2()->asInt32();
1749 m_jit.compare32(condition, op1.gpr(), JITCompiler::Imm32(imm), result.gpr());
1751 // If we add a DataFormatBool, we should use it here.
1752 m_jit.or32(TrustedImm32(ValueFalse), result.gpr());
1753 jsValueResult(result.gpr(), m_currentNode, DataFormatJSBoolean);
1755 SpeculateInt32Operand op1(this, node->child1());
1756 SpeculateInt32Operand op2(this, node->child2());
1757 GPRTemporary result(this, Reuse, op1, op2);
1758 m_jit.compare32(condition, op1.gpr(), op2.gpr(), result.gpr());
1760 // If we add a DataFormatBool, we should use it here.
1761 m_jit.or32(TrustedImm32(ValueFalse), result.gpr());
1762 jsValueResult(result.gpr(), m_currentNode, DataFormatJSBoolean);
1766 void SpeculativeJIT::compileInt52Compare(Node* node, MacroAssembler::RelationalCondition condition)
1768 SpeculateWhicheverInt52Operand op1(this, node->child1());
1769 SpeculateWhicheverInt52Operand op2(this, node->child2(), op1);
1770 GPRTemporary result(this, Reuse, op1, op2);
1772 m_jit.compare64(condition, op1.gpr(), op2.gpr(), result.gpr());
1774 // If we add a DataFormatBool, we should use it here.
1775 m_jit.or32(TrustedImm32(ValueFalse), result.gpr());
1776 jsValueResult(result.gpr(), m_currentNode, DataFormatJSBoolean);
1779 void SpeculativeJIT::compilePeepHoleInt52Branch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1781 BasicBlock* taken = branchNode->branchData()->taken.block;
1782 BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1784 // The branch instruction will branch to the taken block.
1785 // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1786 if (taken == nextBlock()) {
1787 condition = JITCompiler::invert(condition);
1788 BasicBlock* tmp = taken;
1793 SpeculateWhicheverInt52Operand op1(this, node->child1());
1794 SpeculateWhicheverInt52Operand op2(this, node->child2(), op1);
1796 branch64(condition, op1.gpr(), op2.gpr(), taken);
1800 void SpeculativeJIT::compileDoubleCompare(Node* node, MacroAssembler::DoubleCondition condition)
1802 SpeculateDoubleOperand op1(this, node->child1());
1803 SpeculateDoubleOperand op2(this, node->child2());
1804 GPRTemporary result(this);
1806 m_jit.move(TrustedImm32(ValueTrue), result.gpr());
1807 MacroAssembler::Jump trueCase = m_jit.branchDouble(condition, op1.fpr(), op2.fpr());
1808 m_jit.xor64(TrustedImm32(true), result.gpr());
1809 trueCase.link(&m_jit);
1811 jsValueResult(result.gpr(), node, DataFormatJSBoolean);
1814 void SpeculativeJIT::compileObjectOrOtherLogicalNot(Edge nodeUse)
1816 JSValueOperand value(this, nodeUse, ManualOperandSpeculation);
1817 GPRTemporary result(this);
1818 GPRReg valueGPR = value.gpr();
1819 GPRReg resultGPR = result.gpr();
1820 GPRTemporary structure;
1821 GPRReg structureGPR = InvalidGPRReg;
1822 GPRTemporary scratch;
1823 GPRReg scratchGPR = InvalidGPRReg;
1825 bool masqueradesAsUndefinedWatchpointValid =
1826 masqueradesAsUndefinedWatchpointIsStillValid();
1828 if (!masqueradesAsUndefinedWatchpointValid) {
1829 // The masquerades as undefined case will use the structure register, so allocate it here.
1830 // Do this at the top of the function to avoid branching around a register allocation.
1831 GPRTemporary realStructure(this);
1832 GPRTemporary realScratch(this);
1833 structure.adopt(realStructure);
1834 scratch.adopt(realScratch);
1835 structureGPR = structure.gpr();
1836 scratchGPR = scratch.gpr();
1839 MacroAssembler::Jump notCell = m_jit.branchIfNotCell(JSValueRegs(valueGPR));
1840 if (masqueradesAsUndefinedWatchpointValid) {
1842 JSValueRegs(valueGPR), nodeUse, (~SpecCell) | SpecObject, m_jit.branchIfNotObject(valueGPR));
1845 JSValueRegs(valueGPR), nodeUse, (~SpecCell) | SpecObject, m_jit.branchIfNotObject(valueGPR));
1847 MacroAssembler::Jump isNotMasqueradesAsUndefined =
1849 MacroAssembler::Zero,
1850 MacroAssembler::Address(valueGPR, JSCell::typeInfoFlagsOffset()),
1851 MacroAssembler::TrustedImm32(MasqueradesAsUndefined));
1853 m_jit.emitLoadStructure(valueGPR, structureGPR, scratchGPR);
1854 speculationCheck(BadType, JSValueRegs(valueGPR), nodeUse,
1856 MacroAssembler::Equal,
1857 MacroAssembler::Address(structureGPR, Structure::globalObjectOffset()),
1858 MacroAssembler::TrustedImmPtr(m_jit.graph().globalObjectFor(m_currentNode->origin.semantic))));
1860 isNotMasqueradesAsUndefined.link(&m_jit);
1862 m_jit.move(TrustedImm32(ValueFalse), resultGPR);
1863 MacroAssembler::Jump done = m_jit.jump();
1865 notCell.link(&m_jit);
1867 if (needsTypeCheck(nodeUse, SpecCell | SpecOther)) {
1868 m_jit.move(valueGPR, resultGPR);
1869 m_jit.and64(MacroAssembler::TrustedImm32(~TagBitUndefined), resultGPR);
1871 JSValueRegs(valueGPR), nodeUse, SpecCell | SpecOther, m_jit.branch64(
1872 MacroAssembler::NotEqual,
1874 MacroAssembler::TrustedImm64(ValueNull)));
1876 m_jit.move(TrustedImm32(ValueTrue), resultGPR);
1880 jsValueResult(resultGPR, m_currentNode, DataFormatJSBoolean);
1883 void SpeculativeJIT::compileLogicalNot(Node* node)
1885 switch (node->child1().useKind()) {
1886 case ObjectOrOtherUse: {
1887 compileObjectOrOtherLogicalNot(node->child1());
1892 SpeculateInt32Operand value(this, node->child1());
1893 GPRTemporary result(this, Reuse, value);
1894 m_jit.compare32(MacroAssembler::Equal, value.gpr(), MacroAssembler::TrustedImm32(0), result.gpr());
1895 m_jit.or32(TrustedImm32(ValueFalse), result.gpr());
1896 jsValueResult(result.gpr(), node, DataFormatJSBoolean);
1900 case DoubleRepUse: {
1901 SpeculateDoubleOperand value(this, node->child1());
1902 FPRTemporary scratch(this);
1903 GPRTemporary result(this);
1904 m_jit.move(TrustedImm32(ValueFalse), result.gpr());
1905 MacroAssembler::Jump nonZero = m_jit.branchDoubleNonZero(value.fpr(), scratch.fpr());
1906 m_jit.xor32(TrustedImm32(true), result.gpr());
1907 nonZero.link(&m_jit);
1908 jsValueResult(result.gpr(), node, DataFormatJSBoolean);
1913 case KnownBooleanUse: {
1914 if (!needsTypeCheck(node->child1(), SpecBoolean)) {
1915 SpeculateBooleanOperand value(this, node->child1());
1916 GPRTemporary result(this, Reuse, value);
1918 m_jit.move(value.gpr(), result.gpr());
1919 m_jit.xor64(TrustedImm32(true), result.gpr());
1921 jsValueResult(result.gpr(), node, DataFormatJSBoolean);
1925 JSValueOperand value(this, node->child1(), ManualOperandSpeculation);
1926 GPRTemporary result(this); // FIXME: We could reuse, but on speculation fail would need recovery to restore tag (akin to add).
1928 m_jit.move(value.gpr(), result.gpr());
1929 m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), result.gpr());
1931 JSValueRegs(value.gpr()), node->child1(), SpecBoolean, m_jit.branchTest64(
1932 JITCompiler::NonZero, result.gpr(), TrustedImm32(static_cast<int32_t>(~1))));
1933 m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueTrue)), result.gpr());
1935 // If we add a DataFormatBool, we should use it here.
1936 jsValueResult(result.gpr(), node, DataFormatJSBoolean);
1941 JSValueOperand arg1(this, node->child1());
1942 GPRTemporary result(this);
1944 GPRReg arg1GPR = arg1.gpr();
1945 GPRReg resultGPR = result.gpr();
1947 FPRTemporary valueFPR(this);
1948 FPRTemporary tempFPR(this);
1950 bool shouldCheckMasqueradesAsUndefined = !masqueradesAsUndefinedWatchpointIsStillValid();
1951 JSGlobalObject* globalObject = m_jit.graph().globalObjectFor(node->origin.semantic);
1952 std::optional<GPRTemporary> scratch;
1953 GPRReg scratchGPR = InvalidGPRReg;
1954 if (shouldCheckMasqueradesAsUndefined) {
1955 scratch.emplace(this);
1956 scratchGPR = scratch->gpr();
1958 bool negateResult = true;
1959 m_jit.emitConvertValueToBoolean(JSValueRegs(arg1GPR), resultGPR, scratchGPR, valueFPR.fpr(), tempFPR.fpr(), shouldCheckMasqueradesAsUndefined, globalObject, negateResult);
1960 m_jit.or32(TrustedImm32(ValueFalse), resultGPR);
1961 jsValueResult(resultGPR, node, DataFormatJSBoolean);
1965 return compileStringZeroLength(node);
1967 case StringOrOtherUse:
1968 return compileLogicalNotStringOrOther(node);
1971 DFG_CRASH(m_jit.graph(), node, "Bad use kind");
1976 void SpeculativeJIT::emitObjectOrOtherBranch(Edge nodeUse, BasicBlock* taken, BasicBlock* notTaken)
1978 JSValueOperand value(this, nodeUse, ManualOperandSpeculation);
1979 GPRTemporary scratch(this);
1980 GPRTemporary structure;
1981 GPRReg valueGPR = value.gpr();
1982 GPRReg scratchGPR = scratch.gpr();
1983 GPRReg structureGPR = InvalidGPRReg;
1985 if (!masqueradesAsUndefinedWatchpointIsStillValid()) {
1986 GPRTemporary realStructure(this);
1987 structure.adopt(realStructure);
1988 structureGPR = structure.gpr();
1991 MacroAssembler::Jump notCell = m_jit.branchIfNotCell(JSValueRegs(valueGPR));
1992 if (masqueradesAsUndefinedWatchpointIsStillValid()) {
1994 JSValueRegs(valueGPR), nodeUse, (~SpecCell) | SpecObject, m_jit.branchIfNotObject(valueGPR));
1997 JSValueRegs(valueGPR), nodeUse, (~SpecCell) | SpecObject, m_jit.branchIfNotObject(valueGPR));
1999 JITCompiler::Jump isNotMasqueradesAsUndefined = m_jit.branchTest8(
2001 MacroAssembler::Address(valueGPR, JSCell::typeInfoFlagsOffset()),
2002 TrustedImm32(MasqueradesAsUndefined));
2004 m_jit.emitLoadStructure(valueGPR, structureGPR, scratchGPR);
2005 speculationCheck(BadType, JSValueRegs(valueGPR), nodeUse,
2007 MacroAssembler::Equal,
2008 MacroAssembler::Address(structureGPR, Structure::globalObjectOffset()),
2009 MacroAssembler::TrustedImmPtr(m_jit.graph().globalObjectFor(m_currentNode->origin.semantic))));
2011 isNotMasqueradesAsUndefined.link(&m_jit);
2013 jump(taken, ForceJump);
2015 notCell.link(&m_jit);
2017 if (needsTypeCheck(nodeUse, SpecCell | SpecOther)) {
2018 m_jit.move(valueGPR, scratchGPR);
2019 m_jit.and64(MacroAssembler::TrustedImm32(~TagBitUndefined), scratchGPR);
2021 JSValueRegs(valueGPR), nodeUse, SpecCell | SpecOther, m_jit.branch64(
2022 MacroAssembler::NotEqual, scratchGPR, MacroAssembler::TrustedImm64(ValueNull)));
2026 noResult(m_currentNode);
2029 void SpeculativeJIT::emitBranch(Node* node)
2031 BasicBlock* taken = node->branchData()->taken.block;
2032 BasicBlock* notTaken = node->branchData()->notTaken.block;
2034 switch (node->child1().useKind()) {
2035 case ObjectOrOtherUse: {
2036 emitObjectOrOtherBranch(node->child1(), taken, notTaken);
2041 case DoubleRepUse: {
2042 if (node->child1().useKind() == Int32Use) {
2043 bool invert = false;
2045 if (taken == nextBlock()) {
2047 BasicBlock* tmp = taken;
2052 SpeculateInt32Operand value(this, node->child1());
2053 branchTest32(invert ? MacroAssembler::Zero : MacroAssembler::NonZero, value.gpr(), taken);
2055 SpeculateDoubleOperand value(this, node->child1());
2056 FPRTemporary scratch(this);
2057 branchDoubleNonZero(value.fpr(), scratch.fpr(), taken);
2067 emitStringBranch(node->child1(), taken, notTaken);
2071 case StringOrOtherUse: {
2072 emitStringOrOtherBranch(node->child1(), taken, notTaken);
2078 case KnownBooleanUse: {
2079 JSValueOperand value(this, node->child1(), ManualOperandSpeculation);
2080 GPRReg valueGPR = value.gpr();
2082 if (node->child1().useKind() == BooleanUse || node->child1().useKind() == KnownBooleanUse) {
2083 if (!needsTypeCheck(node->child1(), SpecBoolean)) {
2084 MacroAssembler::ResultCondition condition = MacroAssembler::NonZero;
2086 if (taken == nextBlock()) {
2087 condition = MacroAssembler::Zero;
2088 BasicBlock* tmp = taken;
2093 branchTest32(condition, valueGPR, TrustedImm32(true), taken);
2096 branch64(MacroAssembler::Equal, valueGPR, MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(false))), notTaken);
2097 branch64(MacroAssembler::Equal, valueGPR, MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(true))), taken);
2099 typeCheck(JSValueRegs(valueGPR), node->child1(), SpecBoolean, m_jit.jump());
2103 GPRTemporary result(this);
2104 FPRTemporary fprValue(this);
2105 FPRTemporary fprTemp(this);
2106 std::optional<GPRTemporary> scratch;
2108 GPRReg scratchGPR = InvalidGPRReg;
2109 bool shouldCheckMasqueradesAsUndefined = !masqueradesAsUndefinedWatchpointIsStillValid();
2110 if (shouldCheckMasqueradesAsUndefined) {
2111 scratch.emplace(this);
2112 scratchGPR = scratch->gpr();
2115 GPRReg resultGPR = result.gpr();
2116 FPRReg valueFPR = fprValue.fpr();
2117 FPRReg tempFPR = fprTemp.fpr();
2119 if (node->child1()->prediction() & SpecInt32Only) {
2120 branch64(MacroAssembler::Equal, valueGPR, MacroAssembler::TrustedImm64(JSValue::encode(jsNumber(0))), notTaken);
2121 branch64(MacroAssembler::AboveOrEqual, valueGPR, GPRInfo::tagTypeNumberRegister, taken);
2124 if (node->child1()->prediction() & SpecBoolean) {
2125 branch64(MacroAssembler::Equal, valueGPR, MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(false))), notTaken);
2126 branch64(MacroAssembler::Equal, valueGPR, MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(true))), taken);
2131 JSGlobalObject* globalObject = m_jit.graph().globalObjectFor(node->origin.semantic);
2132 m_jit.emitConvertValueToBoolean(JSValueRegs(valueGPR), resultGPR, scratchGPR, valueFPR, tempFPR, shouldCheckMasqueradesAsUndefined, globalObject);
2134 branchTest32(MacroAssembler::NonZero, resultGPR, taken);
2138 noResult(node, UseChildrenCalledExplicitly);
2143 DFG_CRASH(m_jit.graph(), m_currentNode, "Bad use kind");
2147 void SpeculativeJIT::compile(Node* node)
2149 NodeType op = node->op();
2151 #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
2152 m_jit.clearRegisterAllocationOffsets();
2157 case DoubleConstant:
2159 case PhantomDirectArguments:
2160 case PhantomClonedArguments:
2161 initConstantInfo(node);
2164 case LazyJSConstant:
2165 compileLazyJSConstant(node);
2169 speculate(node, node->child1());
2170 switch (node->child1().useKind()) {
2172 case DoubleRepRealUse:
2173 case DoubleRepAnyIntUse: {
2174 SpeculateDoubleOperand op(this, node->child1());
2175 FPRTemporary scratch(this, op);
2176 m_jit.moveDouble(op.fpr(), scratch.fpr());
2177 doubleResult(scratch.fpr(), node);
2181 SpeculateInt52Operand op(this, node->child1());
2182 GPRTemporary result(this, Reuse, op);
2183 m_jit.move(op.gpr(), result.gpr());
2184 int52Result(result.gpr(), node);
2188 JSValueOperand op(this, node->child1());
2189 GPRTemporary result(this, Reuse, op);
2190 m_jit.move(op.gpr(), result.gpr());
2191 jsValueResult(result.gpr(), node);
2199 AbstractValue& value = m_state.variables().operand(node->local());
2201 // If the CFA is tracking this variable and it found that the variable
2202 // cannot have been assigned, then don't attempt to proceed.
2203 if (value.isClear()) {
2204 m_compileOkay = false;
2208 switch (node->variableAccessData()->flushFormat()) {
2209 case FlushedDouble: {
2210 FPRTemporary result(this);
2211 m_jit.loadDouble(JITCompiler::addressFor(node->machineLocal()), result.fpr());
2212 VirtualRegister virtualRegister = node->virtualRegister();
2213 m_fprs.retain(result.fpr(), virtualRegister, SpillOrderDouble);
2214 generationInfoFromVirtualRegister(virtualRegister).initDouble(node, node->refCount(), result.fpr());
2218 case FlushedInt32: {
2219 GPRTemporary result(this);
2220 m_jit.load32(JITCompiler::payloadFor(node->machineLocal()), result.gpr());
2222 // Like int32Result, but don't useChildren - our children are phi nodes,
2223 // and don't represent values within this dataflow with virtual registers.
2224 VirtualRegister virtualRegister = node->virtualRegister();
2225 m_gprs.retain(result.gpr(), virtualRegister, SpillOrderInteger);
2226 generationInfoFromVirtualRegister(virtualRegister).initInt32(node, node->refCount(), result.gpr());
2230 case FlushedInt52: {
2231 GPRTemporary result(this);
2232 m_jit.load64(JITCompiler::addressFor(node->machineLocal()), result.gpr());
2234 VirtualRegister virtualRegister = node->virtualRegister();
2235 m_gprs.retain(result.gpr(), virtualRegister, SpillOrderJS);
2236 generationInfoFromVirtualRegister(virtualRegister).initInt52(node, node->refCount(), result.gpr());
2241 GPRTemporary result(this);
2242 m_jit.load64(JITCompiler::addressFor(node->machineLocal()), result.gpr());
2244 // Like jsValueResult, but don't useChildren - our children are phi nodes,
2245 // and don't represent values within this dataflow with virtual registers.
2246 VirtualRegister virtualRegister = node->virtualRegister();
2247 m_gprs.retain(result.gpr(), virtualRegister, SpillOrderJS);
2250 if (isCellSpeculation(value.m_type))
2251 format = DataFormatJSCell;
2252 else if (isBooleanSpeculation(value.m_type))
2253 format = DataFormatJSBoolean;
2255 format = DataFormatJS;
2257 generationInfoFromVirtualRegister(virtualRegister).initJSValue(node, node->refCount(), result.gpr(), format);
2263 case GetLocalUnlinked: {
2264 GPRTemporary result(this);
2266 m_jit.load64(JITCompiler::addressFor(node->unlinkedMachineLocal()), result.gpr());
2268 jsValueResult(result.gpr(), node);
2273 compileMovHint(m_currentNode);
2279 recordSetLocal(m_currentNode->unlinkedLocal(), VirtualRegister(), DataFormatDead);
2290 switch (node->variableAccessData()->flushFormat()) {
2291 case FlushedDouble: {
2292 SpeculateDoubleOperand value(this, node->child1());
2293 m_jit.storeDouble(value.fpr(), JITCompiler::addressFor(node->machineLocal()));
2295 // Indicate that it's no longer necessary to retrieve the value of
2296 // this bytecode variable from registers or other locations in the stack,
2297 // but that it is stored as a double.
2298 recordSetLocal(DataFormatDouble);
2302 case FlushedInt32: {
2303 SpeculateInt32Operand value(this, node->child1());
2304 m_jit.store32(value.gpr(), JITCompiler::payloadFor(node->machineLocal()));
2306 recordSetLocal(DataFormatInt32);
2310 case FlushedInt52: {
2311 SpeculateInt52Operand value(this, node->child1());
2312 m_jit.store64(value.gpr(), JITCompiler::addressFor(node->machineLocal()));
2314 recordSetLocal(DataFormatInt52);
2319 SpeculateCellOperand cell(this, node->child1());
2320 GPRReg cellGPR = cell.gpr();
2321 m_jit.store64(cellGPR, JITCompiler::addressFor(node->machineLocal()));
2323 recordSetLocal(DataFormatCell);
2327 case FlushedBoolean: {
2328 SpeculateBooleanOperand boolean(this, node->child1());
2329 m_jit.store64(boolean.gpr(), JITCompiler::addressFor(node->machineLocal()));
2331 recordSetLocal(DataFormatBoolean);
2335 case FlushedJSValue: {
2336 JSValueOperand value(this, node->child1());
2337 m_jit.store64(value.gpr(), JITCompiler::addressFor(node->machineLocal()));
2339 recordSetLocal(dataFormatFor(node->variableAccessData()->flushFormat()));
2344 DFG_CRASH(m_jit.graph(), node, "Bad flush format");
2352 // This is a no-op; it just marks the fact that the argument is being used.
2353 // But it may be profitable to use this as a hook to run speculation checks
2354 // on arguments, thereby allowing us to trivially eliminate such checks if
2355 // the argument is not used.
2356 recordSetLocal(dataFormatFor(node->variableAccessData()->flushFormat()));
2362 compileBitwiseOp(node);
2368 compileShiftOp(node);
2371 case UInt32ToNumber: {
2372 compileUInt32ToNumber(node);
2376 case DoubleAsInt32: {
2377 compileDoubleAsInt32(node);
2381 case ValueToInt32: {
2382 compileValueToInt32(node);
2387 compileDoubleRep(node);
2392 compileValueRep(node);
2397 switch (node->child1().useKind()) {
2399 SpeculateInt32Operand operand(this, node->child1());
2400 GPRTemporary result(this, Reuse, operand);
2402 m_jit.signExtend32ToPtr(operand.gpr(), result.gpr());
2404 strictInt52Result(result.gpr(), node);
2409 GPRTemporary result(this);
2410 GPRReg resultGPR = result.gpr();
2412 convertAnyInt(node->child1(), resultGPR);
2414 strictInt52Result(resultGPR, node);
2418 case DoubleRepAnyIntUse: {
2419 SpeculateDoubleOperand value(this, node->child1());
2420 FPRReg valueFPR = value.fpr();
2422 GPRFlushedCallResult result(this);
2423 GPRReg resultGPR = result.gpr();
2427 callOperation(operationConvertDoubleToInt52, resultGPR, valueFPR);
2429 DFG_TYPE_CHECK_WITH_EXIT_KIND(Int52Overflow,
2430 JSValueRegs(), node->child1(), SpecAnyIntAsDouble,
2432 JITCompiler::Equal, resultGPR,
2433 JITCompiler::TrustedImm64(JSValue::notInt52)));
2435 strictInt52Result(resultGPR, node);
2440 DFG_CRASH(m_jit.graph(), node, "Bad use kind");
2446 compileValueAdd(node);
2450 JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2451 JSValueOperand op2(this, node->child2(), ManualOperandSpeculation);
2452 JSValueOperand op3(this, node->child3(), ManualOperandSpeculation);
2454 GPRReg op1GPR = op1.gpr();
2455 GPRReg op2GPR = op2.gpr();
2460 op3GPR = InvalidGPRReg;
2464 GPRFlushedCallResult result(this);
2466 callOperation(operationStrCat3, result.gpr(), op1GPR, op2GPR, op3GPR);
2468 callOperation(operationStrCat2, result.gpr(), op1GPR, op2GPR);
2469 m_jit.exceptionCheck();
2471 cellResult(result.gpr(), node);
2476 compileArithAdd(node);
2480 compileArithClz32(node);
2484 compileMakeRope(node);
2488 compileArithSub(node);
2492 compileArithNegate(node);
2496 compileArithMul(node);
2500 compileArithDiv(node);
2505 compileArithMod(node);
2510 compileArithAbs(node);
2515 switch (node->binaryUseKind()) {
2517 SpeculateStrictInt32Operand op1(this, node->child1());
2518 SpeculateStrictInt32Operand op2(this, node->child2());
2519 GPRTemporary result(this, Reuse, op1);
2521 MacroAssembler::Jump op1Less = m_jit.branch32(op == ArithMin ? MacroAssembler::LessThan : MacroAssembler::GreaterThan, op1.gpr(), op2.gpr());
2522 m_jit.move(op2.gpr(), result.gpr());
2523 if (op1.gpr() != result.gpr()) {
2524 MacroAssembler::Jump done = m_jit.jump();
2525 op1Less.link(&m_jit);
2526 m_jit.move(op1.gpr(), result.gpr());
2529 op1Less.link(&m_jit);
2531 int32Result(result.gpr(), node);
2535 case DoubleRepUse: {
2536 SpeculateDoubleOperand op1(this, node->child1());
2537 SpeculateDoubleOperand op2(this, node->child2());
2538 FPRTemporary result(this, op1);
2540 FPRReg op1FPR = op1.fpr();
2541 FPRReg op2FPR = op2.fpr();
2542 FPRReg resultFPR = result.fpr();
2544 MacroAssembler::JumpList done;
2546 MacroAssembler::Jump op1Less = m_jit.branchDouble(op == ArithMin ? MacroAssembler::DoubleLessThan : MacroAssembler::DoubleGreaterThan, op1FPR, op2FPR);
2548 // op2 is eather the lesser one or one of then is NaN
2549 MacroAssembler::Jump op2Less = m_jit.branchDouble(op == ArithMin ? MacroAssembler::DoubleGreaterThanOrEqual : MacroAssembler::DoubleLessThanOrEqual, op1FPR, op2FPR);
2551 // Unordered case. We don't know which of op1, op2 is NaN. Manufacture NaN by adding
2552 // op1 + op2 and putting it into result.
2553 m_jit.addDouble(op1FPR, op2FPR, resultFPR);
2554 done.append(m_jit.jump());
2556 op2Less.link(&m_jit);
2557 m_jit.moveDouble(op2FPR, resultFPR);
2559 if (op1FPR != resultFPR) {
2560 done.append(m_jit.jump());
2562 op1Less.link(&m_jit);
2563 m_jit.moveDouble(op1FPR, resultFPR);
2565 op1Less.link(&m_jit);
2569 doubleResult(resultFPR, node);
2574 DFG_CRASH(m_jit.graph(), node, "Bad use kind");
2581 compileArithPow(node);
2585 compileArithSqrt(node);
2589 compileArithFRound(node);
2593 compileArithRandom(node);
2600 compileArithRounding(node);
2604 compileArithSin(node);
2608 compileArithCos(node);
2612 compileArithTan(node);
2616 compileArithLog(node);
2620 compileLogicalNot(node);
2624 if (compare(node, JITCompiler::LessThan, JITCompiler::DoubleLessThan, operationCompareLess))
2629 if (compare(node, JITCompiler::LessThanOrEqual, JITCompiler::DoubleLessThanOrEqual, operationCompareLessEq))
2633 case CompareGreater:
2634 if (compare(node, JITCompiler::GreaterThan, JITCompiler::DoubleGreaterThan, operationCompareGreater))
2638 case CompareGreaterEq:
2639 if (compare(node, JITCompiler::GreaterThanOrEqual, JITCompiler::DoubleGreaterThanOrEqual, operationCompareGreaterEq))
2644 if (compare(node, JITCompiler::Equal, JITCompiler::DoubleEqual, operationCompareEq))
2648 case CompareStrictEq:
2649 if (compileStrictEq(node))
2654 compileCompareEqPtr(node);
2657 case StringCharCodeAt: {
2658 compileGetCharCodeAt(node);
2662 case StringCharAt: {
2663 // Relies on StringCharAt node having same basic layout as GetByVal
2664 compileGetByValOnString(node);
2668 case StringFromCharCode: {
2669 compileFromCharCode(node);
2679 case ArrayifyToStructure: {
2685 switch (node->arrayMode().type()) {
2686 case Array::SelectUsingPredictions:
2687 case Array::ForceExit:
2688 DFG_CRASH(m_jit.graph(), node, "Bad array mode type");
2690 case Array::Undecided: {
2691 SpeculateStrictInt32Operand index(this, node->child2());
2692 GPRTemporary result(this, Reuse, index);
2693 GPRReg indexGPR = index.gpr();
2694 GPRReg resultGPR = result.gpr();
2696 speculationCheck(OutOfBounds, JSValueRegs(), node,
2697 m_jit.branch32(MacroAssembler::LessThan, indexGPR, MacroAssembler::TrustedImm32(0)));
2699 use(node->child1());
2702 m_jit.move(MacroAssembler::TrustedImm64(ValueUndefined), resultGPR);
2703 jsValueResult(resultGPR, node, UseChildrenCalledExplicitly);
2706 case Array::Generic: {
2707 JSValueOperand base(this, node->child1());
2708 JSValueOperand property(this, node->child2());
2709 GPRReg baseGPR = base.gpr();
2710 GPRReg propertyGPR = property.gpr();
2713 GPRFlushedCallResult result(this);
2714 callOperation(operationGetByVal, result.gpr(), baseGPR, propertyGPR);
2715 m_jit.exceptionCheck();
2717 jsValueResult(result.gpr(), node);
2721 case Array::Contiguous: {
2722 if (node->arrayMode().isInBounds()) {
2723 SpeculateStrictInt32Operand property(this, node->child2());
2724 StorageOperand storage(this, node->child3());
2726 GPRReg propertyReg = property.gpr();
2727 GPRReg storageReg = storage.gpr();
2732 speculationCheck(OutOfBounds, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())));
2734 GPRTemporary result(this);
2735 m_jit.load64(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight), result.gpr());
2736 if (node->arrayMode().isSaneChain()) {
2737 ASSERT(node->arrayMode().type() == Array::Contiguous);
2738 JITCompiler::Jump notHole = m_jit.branchTest64(
2739 MacroAssembler::NonZero, result.gpr());
2740 m_jit.move(TrustedImm64(JSValue::encode(jsUndefined())), result.gpr());
2741 notHole.link(&m_jit);
2744 LoadFromHole, JSValueRegs(), 0,
2745 m_jit.branchTest64(MacroAssembler::Zero, result.gpr()));
2747 jsValueResult(result.gpr(), node, node->arrayMode().type() == Array::Int32 ? DataFormatJSInt32 : DataFormatJS);
2751 SpeculateCellOperand base(this, node->child1());
2752 SpeculateStrictInt32Operand property(this, node->child2());
2753 StorageOperand storage(this, node->child3());
2755 GPRReg baseReg = base.gpr();
2756 GPRReg propertyReg = property.gpr();
2757 GPRReg storageReg = storage.gpr();
2762 GPRTemporary result(this);
2763 GPRReg resultReg = result.gpr();
2765 MacroAssembler::JumpList slowCases;
2767 slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())));
2769 m_jit.load64(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight), resultReg);
2770 slowCases.append(m_jit.branchTest64(MacroAssembler::Zero, resultReg));
2772 addSlowPathGenerator(
2774 slowCases, this, operationGetByValArrayInt,
2775 result.gpr(), baseReg, propertyReg));
2777 jsValueResult(resultReg, node);
2781 case Array::Double: {
2782 if (node->arrayMode().isInBounds()) {
2783 SpeculateStrictInt32Operand property(this, node->child2());
2784 StorageOperand storage(this, node->child3());
2786 GPRReg propertyReg = property.gpr();
2787 GPRReg storageReg = storage.gpr();
2792 speculationCheck(OutOfBounds, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())));
2794 FPRTemporary result(this);
2795 m_jit.loadDouble(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight), result.fpr());
2796 if (!node->arrayMode().isSaneChain())
2797 speculationCheck(LoadFromHole, JSValueRegs(), 0, m_jit.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered, result.fpr(), result.fpr()));
2798 doubleResult(result.fpr(), node);
2802 SpeculateCellOperand base(this, node->child1());
2803 SpeculateStrictInt32Operand property(this, node->child2());
2804 StorageOperand storage(this, node->child3());
2806 GPRReg baseReg = base.gpr();
2807 GPRReg propertyReg = property.gpr();
2808 GPRReg storageReg = storage.gpr();
2813 GPRTemporary result(this);
2814 FPRTemporary temp(this);
2815 GPRReg resultReg = result.gpr();
2816 FPRReg tempReg = temp.fpr();
2818 MacroAssembler::JumpList slowCases;
2820 slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())));
2822 m_jit.loadDouble(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight), tempReg);
2823 slowCases.append(m_jit.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered, tempReg, tempReg));
2824 boxDouble(tempReg, resultReg);
2826 addSlowPathGenerator(
2828 slowCases, this, operationGetByValArrayInt,
2829 result.gpr(), baseReg, propertyReg));
2831 jsValueResult(resultReg, node);
2835 case Array::ArrayStorage:
2836 case Array::SlowPutArrayStorage: {
2837 if (node->arrayMode().isInBounds()) {
2838 SpeculateStrictInt32Operand property(this, node->child2());
2839 StorageOperand storage(this, node->child3());
2841 GPRReg propertyReg = property.gpr();
2842 GPRReg storageReg = storage.gpr();
2847 speculationCheck(OutOfBounds, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, ArrayStorage::vectorLengthOffset())));
2849 GPRTemporary result(this);
2850 m_jit.load64(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), result.gpr());
2851 speculationCheck(LoadFromHole, JSValueRegs(), 0, m_jit.branchTest64(MacroAssembler::Zero, result.gpr()));
2853 jsValueResult(result.gpr(), node);
2857 SpeculateCellOperand base(this, node->child1());
2858 SpeculateStrictInt32Operand property(this, node->child2());
2859 StorageOperand storage(this, node->child3());
2861 GPRReg baseReg = base.gpr();
2862 GPRReg propertyReg = property.gpr();
2863 GPRReg storageReg = storage.gpr();
2868 GPRTemporary result(this);
2869 GPRReg resultReg = result.gpr();
2871 MacroAssembler::JumpList slowCases;
2873 slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, ArrayStorage::vectorLengthOffset())));
2875 m_jit.load64(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), resultReg);
2876 slowCases.append(m_jit.branchTest64(MacroAssembler::Zero, resultReg));
2878 addSlowPathGenerator(
2880 slowCases, this, operationGetByValArrayInt,
2881 result.gpr(), baseReg, propertyReg));
2883 jsValueResult(resultReg, node);
2887 compileGetByValOnString(node);
2889 case Array::DirectArguments:
2890 compileGetByValOnDirectArguments(node);
2892 case Array::ScopedArguments:
2893 compileGetByValOnScopedArguments(node);
2896 TypedArrayType type = node->arrayMode().typedArrayType();
2898 compileGetByValOnIntTypedArray(node, type);
2900 compileGetByValOnFloatTypedArray(node, type);
2905 case GetByValWithThis: {
2906 JSValueOperand base(this, node->child1());
2907 GPRReg baseGPR = base.gpr();
2908 JSValueOperand thisValue(this, node->child2());
2909 GPRReg thisValueGPR = thisValue.gpr();
2910 JSValueOperand subscript(this, node->child3());
2911 GPRReg subscriptGPR = subscript.gpr();
2913 GPRFlushedCallResult result(this);
2914 GPRReg resultGPR = result.gpr();
2917 callOperation(operationGetByValWithThis, resultGPR, baseGPR, thisValueGPR, subscriptGPR);
2918 m_jit.exceptionCheck();
2920 jsValueResult(resultGPR, node);
2924 case PutByValDirect:
2926 case PutByValAlias: {
2927 Edge child1 = m_jit.graph().varArgChild(node, 0);
2928 Edge child2 = m_jit.graph().varArgChild(node, 1);
2929 Edge child3 = m_jit.graph().varArgChild(node, 2);
2930 Edge child4 = m_jit.graph().varArgChild(node, 3);
2932 ArrayMode arrayMode = node->arrayMode().modeForPut();
2933 bool alreadyHandled = false;
2935 switch (arrayMode.type()) {
2936 case Array::SelectUsingPredictions:
2937 case Array::ForceExit:
2938 DFG_CRASH(m_jit.graph(), node, "Bad array mode type");
2940 case Array::Generic: {
2941 DFG_ASSERT(m_jit.graph(), node, node->op() == PutByVal || node->op() == PutByValDirect);
2943 JSValueOperand arg1(this, child1);
2944 JSValueOperand arg2(this, child2);
2945 JSValueOperand arg3(this, child3);
2946 GPRReg arg1GPR = arg1.gpr();
2947 GPRReg arg2GPR = arg2.gpr();
2948 GPRReg arg3GPR = arg3.gpr();
2950 if (node->op() == PutByValDirect)
2951 callOperation(m_jit.isStrictModeFor(node->origin.semantic) ? operationPutByValDirectStrict : operationPutByValDirectNonStrict, arg1GPR, arg2GPR, arg3GPR);
2953 callOperation(m_jit.isStrictModeFor(node->origin.semantic) ? operationPutByValStrict : operationPutByValNonStrict, arg1GPR, arg2GPR, arg3GPR);
2954 m_jit.exceptionCheck();
2957 alreadyHandled = true;
2967 // FIXME: the base may not be necessary for some array access modes. But we have to
2968 // keep it alive to this point, so it's likely to be in a register anyway. Likely
2969 // no harm in locking it here.
2970 SpeculateCellOperand base(this, child1);
2971 SpeculateStrictInt32Operand property(this, child2);
2973 GPRReg baseReg = base.gpr();
2974 GPRReg propertyReg = property.gpr();
2976 switch (arrayMode.type()) {
2978 case Array::Contiguous: {
2979 JSValueOperand value(this, child3, ManualOperandSpeculation);
2981 GPRReg valueReg = value.gpr();
2986 if (arrayMode.type() == Array::Int32) {
2988 JSValueRegs(valueReg), child3, SpecInt32Only,
2990 MacroAssembler::Below, valueReg, GPRInfo::tagTypeNumberRegister));
2993 StorageOperand storage(this, child4);
2994 GPRReg storageReg = storage.gpr();
2996 if (node->op() == PutByValAlias) {
2997 // Store the value to the array.
2998 GPRReg propertyReg = property.gpr();
2999 GPRReg valueReg = value.gpr();
3000 m_jit.store64(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
3006 GPRTemporary temporary;
3007 GPRReg temporaryReg = temporaryRegisterForPutByVal(temporary, node);
3009 MacroAssembler::Jump slowCase;
3011 if (arrayMode.isInBounds()) {
3013 OutOfBounds, JSValueRegs(), 0,
3014 m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())));
3016 MacroAssembler::Jump inBounds = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
3018 slowCase = m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfVectorLength()));
3020 if (!arrayMode.isOutOfBounds())
3021 speculationCheck(OutOfBounds, JSValueRegs(), 0, slowCase);
3023 m_jit.add32(TrustedImm32(1), propertyReg, temporaryReg);
3024 m_jit.store32(temporaryReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
3026 inBounds.link(&m_jit);
3029 m_jit.store64(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
3036 if (arrayMode.isOutOfBounds()) {
3037 if (node->op() == PutByValDirect) {
3038 addSlowPathGenerator(slowPathCall(
3040 m_jit.codeBlock()->isStrictMode() ? operationPutByValDirectBeyondArrayBoundsStrict : operationPutByValDirectBeyondArrayBoundsNonStrict,
3041 NoResult, baseReg, propertyReg, valueReg));
3043 addSlowPathGenerator(slowPathCall(
3045 m_jit.codeBlock()->isStrictMode() ? operationPutByValBeyondArrayBoundsStrict : operationPutByValBeyondArrayBoundsNonStrict,
3046 NoResult, baseReg, propertyReg, valueReg));
3050 noResult(node, UseChildrenCalledExplicitly);
3054 case Array::Double: {
3055 compileDoublePutByVal(node, base, property);
3059 case Array::ArrayStorage:
3060 case Array::SlowPutArrayStorage: {
3061 JSValueOperand value(this, child3);
3063 GPRReg valueReg = value.gpr();
3068 StorageOperand storage(this, child4);
3069 GPRReg storageReg = storage.gpr();
3071 if (node->op() == PutByValAlias) {
3072 // Store the value to the array.
3073 GPRReg propertyReg = property.gpr();
3074 GPRReg valueReg = value.gpr();
3075 m_jit.store64(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
3081 GPRTemporary temporary;
3082 GPRReg temporaryReg = temporaryRegisterForPutByVal(temporary, node);
3084 MacroAssembler::JumpList slowCases;
3086 MacroAssembler::Jump beyondArrayBounds = m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, ArrayStorage::vectorLengthOffset()));
3087 if (!arrayMode.isOutOfBounds())
3088 speculationCheck(OutOfBounds, JSValueRegs(), 0, beyondArrayBounds);
3090 slowCases.append(beyondArrayBounds);
3092 // Check if we're writing to a hole; if so increment m_numValuesInVector.
3093 if (arrayMode.isInBounds()) {
3095 StoreToHole, JSValueRegs(), 0,
3096 m_jit.branchTest64(MacroAssembler::Zero, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]))));
3098 MacroAssembler::Jump notHoleValue = m_jit.branchTest64(MacroAssembler::NonZero, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
3099 if (arrayMode.isSlowPut()) {
3100 // This is sort of strange. If we wanted to optimize this code path, we would invert
3101 // the above branch. But it's simply not worth it since this only happens if we're
3102 // already having a bad time.
3103 slowCases.append(m_jit.jump());
3105 m_jit.add32(TrustedImm32(1), MacroAssembler::Address(storageReg, ArrayStorage::numValuesInVectorOffset()));
3107 // If we're writing to a hole we might be growing the array;
3108 MacroAssembler::Jump lengthDoesNotNeedUpdate = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(storageReg, ArrayStorage::lengthOffset()));
3109 m_jit.add32(TrustedImm32(1), propertyReg, temporaryReg);
3110 m_jit.store32(temporaryReg, MacroAssembler::Address(storageReg, ArrayStorage::lengthOffset()));
3112 lengthDoesNotNeedUpdate.link(&m_jit);
3114 notHoleValue.link(&m_jit);
3117 // Store the value to the array.
3118 m_jit.store64(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
3125 if (!slowCases.empty()) {
3126 if (node->op() == PutByValDirect) {
3127 addSlowPathGenerator(slowPathCall(
3129 m_jit.codeBlock()->isStrictMode() ? operationPutByValDirectBeyondArrayBoundsStrict : operationPutByValDirectBeyondArrayBoundsNonStrict,
3130 NoResult, baseReg, propertyReg, valueReg));
3132 addSlowPathGenerator(slowPathCall(
3134 m_jit.codeBlock()->isStrictMode() ? operationPutByValBeyondArrayBoundsStrict : operationPutByValBeyondArrayBoundsNonStrict,
3135 NoResult, baseReg, propertyReg, valueReg));
3139 noResult(node, UseChildrenCalledExplicitly);
3144 TypedArrayType type = arrayMode.typedArrayType();
3146 compilePutByValForIntTypedArray(base.gpr(), property.gpr(), node, type);
3148 compilePutByValForFloatTypedArray(base.gpr(), property.gpr(), node, type);
3155 bool sample = false;
3158 m_jit.incrementSuperSamplerCount();
3160 SpeculateCellOperand globalObject(this, node->child1());
3161 GPRReg globalObjectGPR = globalObject.gpr();
3163 if (node->child2().useKind() == RegExpObjectUse) {
3164 if (node->child3().useKind() == StringUse) {
3165 SpeculateCellOperand base(this, node->child2());
3166 SpeculateCellOperand argument(this, node->child3());
3167 GPRReg baseGPR = base.gpr();
3168 GPRReg argumentGPR = argument.gpr();
3169 speculateRegExpObject(node->child2(), baseGPR);
3170 speculateString(node->child3(), argumentGPR);
3173 GPRFlushedCallResult result(this);
3174 callOperation(operationRegExpExecString, result.gpr(), globalObjectGPR, baseGPR, argumentGPR);
3175 m_jit.exceptionCheck();
3177 jsValueResult(result.gpr(), node);
3180 m_jit.decrementSuperSamplerCount();
3184 SpeculateCellOperand base(this, node->child2());
3185 JSValueOperand argument(this, node->child3());
3186 GPRReg baseGPR = base.gpr();
3187 GPRReg argumentGPR = argument.gpr();
3188 speculateRegExpObject(node->child2(), baseGPR);
3191 GPRFlushedCallResult result(this);
3192 callOperation(operationRegExpExec, result.gpr(), globalObjectGPR, baseGPR, argumentGPR);
3193 m_jit.exceptionCheck();
3195 jsValueResult(result.gpr(), node);
3198 m_jit.decrementSuperSamplerCount();
3202 JSValueOperand base(this, node->child2());
3203 JSValueOperand argument(this, node->child3());
3204 GPRReg baseGPR = base.gpr();
3205 GPRReg argumentGPR = argument.gpr();
3208 GPRFlushedCallResult result(this);
3209 callOperation(operationRegExpExecGeneric, result.gpr(), globalObjectGPR, baseGPR, argumentGPR);
3210 m_jit.exceptionCheck();
3212 jsValueResult(result.gpr(), node);
3215 m_jit.decrementSuperSamplerCount();
3220 SpeculateCellOperand globalObject(this, node->child1());
3221 GPRReg globalObjectGPR = globalObject.gpr();
3223 if (node->child2().useKind() == RegExpObjectUse) {
3224 if (node->child3().useKind() == StringUse) {
3225 SpeculateCellOperand base(this, node->child2());
3226 SpeculateCellOperand argument(this, node->child3());
3227 GPRReg baseGPR = base.gpr();
3228 GPRReg argumentGPR = argument.gpr();
3229 speculateRegExpObject(node->child2(), baseGPR);
3230 speculateString(node->child3(), argumentGPR);
3233 GPRFlushedCallResult result(this);
3234 callOperation(operationRegExpTestString, result.gpr(), globalObjectGPR, baseGPR, argumentGPR);
3235 m_jit.exceptionCheck();
3237 m_jit.or32(TrustedImm32(ValueFalse), result.gpr());
3238 jsValueResult(result.gpr(), node);
3242 SpeculateCellOperand base(this, node->child2());
3243 JSValueOperand argument(this, node->child3());
3244 GPRReg baseGPR = base.gpr();
3245 GPRReg argumentGPR = argument.gpr();
3246 speculateRegExpObject(node->child2(), baseGPR);
3249 GPRFlushedCallResult result(this);
3250 callOperation(operationRegExpTest, result.gpr(), globalObjectGPR, baseGPR, argumentGPR);
3251 m_jit.exceptionCheck();
3253 m_jit.or32(TrustedImm32(ValueFalse), result.gpr());
3254 jsValueResult(result.gpr(), node);
3258 JSValueOperand base(this, node->child2());
3259 JSValueOperand argument(this, node->child3());
3260 GPRReg baseGPR = base.gpr();
3261 GPRReg argumentGPR = argument.gpr();
3264 GPRFlushedCallResult result(this);
3265 callOperation(operationRegExpTestGeneric, result.gpr(), globalObjectGPR, baseGPR, argumentGPR);
3266 m_jit.exceptionCheck();
3268 m_jit.or32(TrustedImm32(ValueFalse), result.gpr());
3269 jsValueResult(result.gpr(), node, DataFormatJSBoolean);
3274 case StringReplaceRegExp: {
3275 bool sample = false;
3278 m_jit.incrementSuperSamplerCount();
3280 if (node->child1().useKind() == StringUse
3281 && node->child2().useKind() == RegExpObjectUse
3282 && node->child3().useKind() == StringUse) {
3283 if (JSString* replace = node->child3()->dynamicCastConstant<JSString*>()) {
3284 if (!replace->length()) {
3285 SpeculateCellOperand string(this, node->child1());
3286 SpeculateCellOperand regExp(this, node->child2());
3287 GPRReg stringGPR = string.gpr();
3288 GPRReg regExpGPR = regExp.gpr();
3289 speculateString(node->child1(), stringGPR);
3290 speculateRegExpObject(node->child2(), regExpGPR);
3293 GPRFlushedCallResult result(this);
3295 operationStringProtoFuncReplaceRegExpEmptyStr, result.gpr(), stringGPR,
3297 m_jit.exceptionCheck();
3298 cellResult(result.gpr(), node);
3300 m_jit.decrementSuperSamplerCount();
3305 SpeculateCellOperand string(this, node->child1());
3306 SpeculateCellOperand regExp(this, node->child2());
3307 SpeculateCellOperand replace(this, node->child3());
3308 GPRReg stringGPR = string.gpr();
3309 GPRReg regExpGPR = regExp.gpr();
3310 GPRReg replaceGPR = replace.gpr();
3311 speculateString(node->child1(), stringGPR);
3312 speculateRegExpObject(node->child2(), regExpGPR);
3313 speculateString(node->child3(), replaceGPR);
3316 GPRFlushedCallResult result(this);
3318 operationStringProtoFuncReplaceRegExpString, result.gpr(), stringGPR, regExpGPR,
3320 m_jit.exceptionCheck();
3321 cellResult(result.gpr(), node);
3323 m_jit.decrementSuperSamplerCount();
3327 // If we fixed up the edge of child2, we inserted a Check(@child2, String).
3328 OperandSpeculationMode child2SpeculationMode = AutomaticOperandSpeculation;
3329 if (node->child2().useKind() == StringUse)
3330 child2SpeculationMode = ManualOperandSpeculation;
3332 JSValueOperand string(this, node->child1());
3333 JSValueOperand search(this, node->child2(), child2SpeculationMode);
3334 JSValueOperand replace(this, node->child3());
3335 GPRReg stringGPR = string.gpr();
3336 GPRReg searchGPR = search.gpr();
3337 GPRReg replaceGPR = replace.gpr();
3340 GPRFlushedCallResult result(this);
3342 operationStringProtoFuncReplaceGeneric, result.gpr(), stringGPR, searchGPR,
3344 m_jit.exceptionCheck();
3345 cellResult(result.gpr(), node);
3347 m_jit.decrementSuperSamplerCount();
3351 case GetRegExpObjectLastIndex: {
3352 compileGetRegExpObjectLastIndex(node);
3356 case SetRegExpObjectLastIndex: {
3357 compileSetRegExpObjectLastIndex(node);
3361 case RecordRegExpCachedResult: {
3362 compileRecordRegExpCachedResult(node);
3367 ASSERT(node->arrayMode().isJSArray());
3369 SpeculateCellOperand base(this, node->child1());
3370 GPRTemporary storageLength(this);
3372 GPRReg baseGPR = base.gpr();
3373 GPRReg storageLengthGPR = storageLength.gpr();
3375 StorageOperand storage(this, node->child3());
3376 GPRReg storageGPR = storage.gpr();
3378 switch (node->arrayMode().type()) {
3380 case Array::Contiguous: {
3381 JSValueOperand value(this, node->child2(), ManualOperandSpeculation);
3382 GPRReg valueGPR = value.gpr();
3384 if (node->arrayMode().type() == Array::Int32) {
3386 JSValueRegs(valueGPR), node->child2(), SpecInt32Only,
3388 MacroAssembler::Below, valueGPR, GPRInfo::tagTypeNumberRegister));
3391 m_jit.load32(MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()), storageLengthGPR);
3392 MacroAssembler::Jump slowPath = m_jit.branch32(MacroAssembler::AboveOrEqual, storageLengthGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfVectorLength()));
3393 m_jit.store64(valueGPR, MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight));
3394 m_jit.add32(TrustedImm32(1), storageLengthGPR);
3395 m_jit.store32(storageLengthGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()));
3396 m_jit.or64(GPRInfo::tagTypeNumberRegister, storageLengthGPR);
3398 addSlowPathGenerator(
3400 slowPath, this, operationArrayPush, storageLengthGPR,
3401 valueGPR, baseGPR));
3403 jsValueResult(storageLengthGPR, node);
3407 case Array::Double: {
3408 SpeculateDoubleOperand value(this, node->child2());
3409 FPRReg valueFPR = value.fpr();
3412 JSValueRegs(), node->child2(), SpecDoubleReal,
3413 m_jit.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered, valueFPR, valueFPR));
3415 m_jit.load32(MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()), storageLengthGPR);
3416 MacroAssembler::Jump slowPath = m_jit.branch32(MacroAssembler::AboveOrEqual, storageLengthGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfVectorLength()));
3417 m_jit.storeDouble(valueFPR, MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight));
3418 m_jit.add32(TrustedImm32(1), storageLengthGPR);
3419 m_jit.store32(storageLengthGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()));
3420 m_jit.or64(GPRInfo::tagTypeNumberRegister, storageLengthGPR);
3422 addSlowPathGenerator(
3424 slowPath, this, operationArrayPushDouble, storageLengthGPR,
3425 valueFPR, baseGPR));
3427 jsValueResult(storageLengthGPR, node);
3431 case Array::ArrayStorage: {
3432 JSValueOperand value(this, node->child2());
3433 GPRReg valueGPR = value.gpr();
3435 m_jit.load32(MacroAssembler::Address(storageGPR, ArrayStorage::lengthOffset()), storageLengthGPR);
3437 // Refuse to handle bizarre lengths.
3438 speculationCheck(Uncountable, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::Above, storageLengthGPR, TrustedImm32(0x7ffffffe)));
3440 MacroAssembler::Jump slowPath = m_jit.branch32(MacroAssembler::AboveOrEqual, storageLengthGPR, MacroAssembler::Address(storageGPR, ArrayStorage::vectorLengthOffset()));
3442 m_jit.store64(valueGPR, MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
3444 m_jit.add32(TrustedImm32(1), storageLengthGPR);
3445 m_jit.store32(storageLengthGPR, MacroAssembler::Address(storageGPR, ArrayStorage::lengthOffset()));
3446 m_jit.add32(TrustedImm32(1), MacroAssembler::Address(storageGPR, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector)));
3447 m_jit.or64(GPRInfo::tagTypeNumberRegister, storageLengthGPR);
3449 addSlowPathGenerator(
3451 slowPath, this, operationArrayPush, NoResult, storageLengthGPR,
3452 valueGPR, baseGPR));
3454 jsValueResult(storageLengthGPR, node);
3466 ASSERT(node->arrayMode().isJSArray());
3468 SpeculateCellOperand base(this, node->child1());
3469 StorageOperand storage(this, node->child2());
3470 GPRTemporary value(this);
3471 GPRTemporary storageLength(this);
3472 FPRTemporary temp(this); // This is kind of lame, since we don't always need it. I'm relying on the fact that we don't have FPR pressure, especially in code that uses pop().
3474 GPRReg baseGPR = base.gpr();
3475 GPRReg storageGPR = storage.gpr();
3476 GPRReg valueGPR = value.gpr();
3477 GPRReg storageLengthGPR = storageLength.gpr();
3478 FPRReg tempFPR = temp.fpr();
3480 switch (node->arrayMode().type()) {
3483 case Array::Contiguous: {
3485 MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()), storageLengthGPR);
3486 MacroAssembler::Jump undefinedCase =
3487 m_jit.branchTest32(MacroAssembler::Zero, storageLengthGPR);
3488 m_jit.sub32(TrustedImm32(1), storageLengthGPR);
3490 storageLengthGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()));
3491 MacroAssembler::Jump slowCase;
3492 if (node->arrayMode().type() == Array::Double) {
3494 MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight),
3496 // FIXME: This would not have to be here if changing the publicLength also zeroed the values between the old
3497 // length and the new length.
3499 MacroAssembler::TrustedImm64(bitwise_cast<int64_t>(PNaN)), MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight));
3500 slowCase = m_jit.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered, tempFPR, tempFPR);
3501 boxDouble(tempFPR, valueGPR);
3504 MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight),
3506 // FIXME: This would not have to be here if changing the publicLength also zeroed the values between the old
3507 // length and the new length.
3509 MacroAssembler::TrustedImm64((int64_t)0), MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight));
3510 slowCase = m_jit.branchTest64(MacroAssembler::Zero, valueGPR);
3513 addSlowPathGenerator(
3515 undefinedCase, this,
3516 MacroAssembler::TrustedImm64(JSValue::encode(jsUndefined())), valueGPR));
3517 addSlowPathGenerator(
3519 slowCase, this, operationArrayPopAndRecoverLength, valueGPR, baseGPR));
3521 // We can't know for sure that the result is an int because of the slow paths. :-/
3522 jsValueResult(valueGPR, node);
3526 case Array::ArrayStorage: {
3527 m_jit.load32(MacroAssembler::Address(storageGPR, ArrayStorage::lengthOffset()), storageLengthGPR);
3529 JITCompiler::Jump undefinedCase =
3530 m_jit.branchTest32(MacroAssembler::Zero, storageLengthGPR);
3532 m_jit.sub32(TrustedImm32(1), storageLengthGPR);
3534 JITCompiler::JumpList slowCases;
3535 slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, storageLengthGPR, MacroAssembler::Address(storageGPR, ArrayStorage::vectorLengthOffset())));
3537 m_jit.load64(MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), valueGPR);
3538 slowCases.append(m_jit.branchTest64(MacroAssembler::Zero, valueGPR));
3540 m_jit.store32(storageLengthGPR, MacroAssembler::Address(storageGPR, ArrayStorage::lengthOffset()));
3542 m_jit.store64(MacroAssembler::TrustedImm64((int64_t)0), MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
3543 m_jit.sub32(MacroAssembler::TrustedImm32(1), MacroAssembler::Address(storageGPR, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector)));
3545 addSlowPathGenerator(
3547 undefinedCase, this,
3548 MacroAssembler::TrustedImm64(JSValue::encode(jsUndefined())), valueGPR));
3550 addSlowPathGenerator(
3552 slowCases, this, operationArrayPop, valueGPR, baseGPR));
3554 jsValueResult(valueGPR, node);
3566 jump(node->targetBlock());
3580 ASSERT(GPRInfo::callFrameRegister != GPRInfo::regT1);
3581 ASSERT(GPRInfo::regT1 != GPRInfo::returnValueGPR);
3582 ASSERT(GPRInfo::returnValueGPR != GPRInfo::callFrameRegister);
3584 // Return the result in returnValueGPR.
3585 JSValueOperand op1(this, node->child1());
3586 m_jit.move(op1.gpr(), GPRInfo::returnValueGPR);
3588 m_jit.emitRestoreCalleeSaves();
3589 m_jit.emitFunctionEpilogue();
3597 case ThrowStaticError: {
3598 // We expect that throw statements are rare and are intended to exit the code block
3599 // anyway, so we just OSR back to the old JIT for now.
3600 terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
3604 case BooleanToNumber: {
3605 switch (node->child1().useKind()) {
3607 JSValueOperand value(this, node->child1(), ManualOperandSpeculation);
3608 GPRTemporary result(this); // FIXME: We could reuse, but on speculation fail would need recovery to restore tag (akin to add).
3610 m_jit.move(value.gpr(), result.gpr());
3611 m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), result.gpr());
3613 JSValueRegs(value.gpr()), node->child1(), SpecBoolean, m_jit.branchTest64(
3614 JITCompiler::NonZero, result.gpr(), TrustedImm32(static_cast<int32_t>(~1))));
3616 int32Result(result.gpr(), node);
3621 JSValueOperand value(this, node->child1());
3622 GPRTemporary result(this);
3624 if (!m_interpreter.needsTypeCheck(node->child1(), SpecBoolInt32 | SpecBoolean)) {
3625 m_jit.move(value.gpr(), result.gpr());
3626 m_jit.and32(TrustedImm32(1), result.gpr());
3627 int32Result(result.gpr(), node);
3631 m_jit.move(value.gpr(), result.gpr());
3632 m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), result.gpr());
3633 JITCompiler::Jump isBoolean = m_jit.branchTest64(
3634 JITCompiler::Zero, result.gpr(), TrustedImm32(static_cast<int32_t>(~1)));
3635 m_jit.move(value.gpr(), result.gpr());
3636 JITCompiler::Jump done = m_jit.jump();
3637 isBoolean.link(&m_jit);
3638 m_jit.or64(GPRInfo::tagTypeNumberRegister, result.gpr());
3641 jsValueResult(result.gpr(), node);
3646 DFG_CRASH(m_jit.graph(), node, "Bad use kind");
3653 DFG_ASSERT(m_jit.graph(), node, node->child1().useKind() == UntypedUse);
3654 JSValueOperand argument(this, node->child1());
3655 GPRTemporary result(this, Reuse, argument);
3657 GPRReg argumentGPR = argument.gpr();
3658 GPRReg resultGPR = result.gpr();