#include "DFGCallArrayAllocatorSlowPathGenerator.h"
#include "DFGCallCreateDirectArgumentsSlowPathGenerator.h"
#include "DFGMayExit.h"
+#include "DFGOSRExitFuzz.h"
#include "DFGSaneStringGetByValSlowPathGenerator.h"
#include "DFGSlowPathGenerator.h"
#include "DirectArguments.h"
+#include "JITAddGenerator.h"
+#include "JITSubGenerator.h"
+#include "JSArrowFunction.h"
#include "JSCInlines.h"
#include "JSEnvironmentRecord.h"
#include "JSLexicalEnvironment.h"
, m_interpreter(m_jit.graph(), m_state)
, m_stream(&jit.jitCode()->variableEventStream)
, m_minifiedGraph(&jit.jitCode()->minifiedDFG)
- , m_isCheckingArgumentTypes(false)
{
}
GPRInfo::callFrameRegister, startGPR);
}
+MacroAssembler::Jump SpeculativeJIT::emitOSRExitFuzzCheck()
+{
+ if (!doOSRExitFuzzing())
+ return MacroAssembler::Jump();
+
+ MacroAssembler::Jump result;
+
+ m_jit.pushToSave(GPRInfo::regT0);
+ m_jit.load32(&g_numberOfOSRExitFuzzChecks, GPRInfo::regT0);
+ m_jit.add32(TrustedImm32(1), GPRInfo::regT0);
+ m_jit.store32(GPRInfo::regT0, &g_numberOfOSRExitFuzzChecks);
+ unsigned atOrAfter = Options::fireOSRExitFuzzAtOrAfter();
+ unsigned at = Options::fireOSRExitFuzzAt();
+ if (at || atOrAfter) {
+ unsigned threshold;
+ MacroAssembler::RelationalCondition condition;
+ if (atOrAfter) {
+ threshold = atOrAfter;
+ condition = MacroAssembler::Below;
+ } else {
+ threshold = at;
+ condition = MacroAssembler::NotEqual;
+ }
+ MacroAssembler::Jump ok = m_jit.branch32(
+ condition, GPRInfo::regT0, MacroAssembler::TrustedImm32(threshold));
+ m_jit.popToRestore(GPRInfo::regT0);
+ result = m_jit.jump();
+ ok.link(&m_jit);
+ }
+ m_jit.popToRestore(GPRInfo::regT0);
+
+ return result;
+}
+
void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail)
{
if (!m_compileOkay)
return;
- ASSERT(m_isCheckingArgumentTypes || m_canExit);
- m_jit.appendExitInfo(jumpToFail);
+ JITCompiler::Jump fuzzJump = emitOSRExitFuzzCheck();
+ if (fuzzJump.isSet()) {
+ JITCompiler::JumpList jumpsToFail;
+ jumpsToFail.append(fuzzJump);
+ jumpsToFail.append(jumpToFail);
+ m_jit.appendExitInfo(jumpsToFail);
+ } else
+ m_jit.appendExitInfo(jumpToFail);
m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
}
{
if (!m_compileOkay)
return;
- ASSERT(m_isCheckingArgumentTypes || m_canExit);
- m_jit.appendExitInfo(jumpsToFail);
+ JITCompiler::Jump fuzzJump = emitOSRExitFuzzCheck();
+ if (fuzzJump.isSet()) {
+ JITCompiler::JumpList myJumpsToFail;
+ myJumpsToFail.append(jumpsToFail);
+ myJumpsToFail.append(fuzzJump);
+ m_jit.appendExitInfo(myJumpsToFail);
+ } else
+ m_jit.appendExitInfo(jumpsToFail);
m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
}
{
if (!m_compileOkay)
return OSRExitJumpPlaceholder();
- ASSERT(m_isCheckingArgumentTypes || m_canExit);
unsigned index = m_jit.jitCode()->osrExit.size();
m_jit.appendExitInfo();
m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
OSRExitJumpPlaceholder SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse)
{
- ASSERT(m_isCheckingArgumentTypes || m_canExit);
return speculationCheck(kind, jsValueSource, nodeUse.node());
}
void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail)
{
- ASSERT(m_isCheckingArgumentTypes || m_canExit);
speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail);
}
void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, const MacroAssembler::JumpList& jumpsToFail)
{
- ASSERT(m_isCheckingArgumentTypes || m_canExit);
speculationCheck(kind, jsValueSource, nodeUse.node(), jumpsToFail);
}
{
if (!m_compileOkay)
return;
- ASSERT(m_isCheckingArgumentTypes || m_canExit);
unsigned recoveryIndex = m_jit.jitCode()->appendSpeculationRecovery(recovery);
m_jit.appendExitInfo(jumpToFail);
m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size(), recoveryIndex));
void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
{
- ASSERT(m_isCheckingArgumentTypes || m_canExit);
speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail, recovery);
}
{
if (!m_compileOkay)
return;
- ASSERT(m_canExit);
OSRExitCompilationInfo& info = m_jit.appendExitInfo(JITCompiler::JumpList());
m_jit.jitCode()->appendOSRExit(OSRExit(
UncountableInvalidation, JSValueSource(),
void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Node* node)
{
- ASSERT(m_isCheckingArgumentTypes || m_canExit);
if (!m_compileOkay)
return;
speculationCheck(kind, jsValueRegs, node, m_jit.jump());
void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Edge nodeUse)
{
- ASSERT(m_isCheckingArgumentTypes || m_canExit);
terminateSpeculativeExecution(kind, jsValueRegs, nodeUse.node());
}
result.set(fpr);
}
- result.merge(RegisterSet::specialRegisters());
+ result.merge(RegisterSet::stubUnavailableRegisters());
return result;
}
case Array::Contiguous:
return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, ContiguousShape);
+ case Array::Undecided:
+ return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, UndecidedShape);
+
case Array::ArrayStorage:
case Array::SlowPutArrayStorage: {
ASSERT(!arrayMode.isJSArrayWithOriginalStructure());
const ClassInfo* expectedClassInfo = 0;
switch (node->arrayMode().type()) {
+ case Array::AnyTypedArray:
case Array::String:
RELEASE_ASSERT_NOT_REACHED(); // Should have been a Phantom(String:)
break;
case Array::Int32:
case Array::Double:
case Array::Contiguous:
+ case Array::Undecided:
case Array::ArrayStorage:
case Array::SlowPutArrayStorage: {
GPRTemporary temp(this);
if (JSString* string = node->child1()->dynamicCastConstant<JSString*>()) {
if (string->tryGetValueImpl() && string->tryGetValueImpl()->isAtomic()) {
- StructureStubInfo* stubInfo = m_jit.codeBlock()->addStubInfo();
+ StructureStubInfo* stubInfo = m_jit.codeBlock()->addStubInfo(AccessType::In);
GPRTemporary result(this);
GPRReg resultGPR = result.gpr();
MacroAssembler::PatchableJump jump = m_jit.patchableJump();
MacroAssembler::Label done = m_jit.label();
+ // Since this block is executed only when the result of string->tryGetValueImpl() is atomic,
+ // we can cast it to const AtomicStringImpl* safely.
auto slowPath = slowPathCall(
jump.m_jump, this, operationInOptimize,
JSValueRegs::payloadOnly(resultGPR), stubInfo, baseGPR,
- string->tryGetValueImpl());
+ static_cast<const AtomicStringImpl*>(string->tryGetValueImpl()));
+ stubInfo->callSiteIndex = m_jit.addCallSite(node->origin.semantic);
stubInfo->codeOrigin = node->origin.semantic;
stubInfo->patch.baseGPR = static_cast<int8_t>(baseGPR);
stubInfo->patch.valueGPR = static_cast<int8_t>(resultGPR);
+#if USE(JSVALUE32_64)
+ stubInfo->patch.valueTagGPR = static_cast<int8_t>(InvalidGPRReg);
+ stubInfo->patch.baseTagGPR = static_cast<int8_t>(InvalidGPRReg);
+#endif
stubInfo->patch.usedRegisters = usedRegisters();
- stubInfo->patch.spillMode = NeedToSpill;
m_jit.addIn(InRecord(jump, done, slowPath.get(), stubInfo));
addSlowPathGenerator(WTF::move(slowPath));
callOperation(
operationGenericIn, extractResult(JSValueRegs::payloadOnly(resultGPR)),
baseGPR, regs);
+ m_jit.exceptionCheck();
blessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
}
m_fpr = m_jit->reuse(op1.fpr());
else if (m_jit->canReuse(op2.node()))
m_fpr = m_jit->reuse(op2.fpr());
+ else if (m_jit->canReuse(op1.node(), op2.node()) && op1.fpr() == op2.fpr())
+ m_fpr = m_jit->reuse(op1.fpr());
else
m_fpr = m_jit->fprAllocate();
}
}
if (node->isBinaryUseKind(BooleanUse))
compilePeepHoleBooleanBranch(node, branchNode, condition);
+ else if (node->isBinaryUseKind(SymbolUse))
+ compilePeepHoleSymbolEquality(node, branchNode);
else if (node->isBinaryUseKind(ObjectUse))
compilePeepHoleObjectEquality(node, branchNode);
else if (node->isBinaryUseKind(ObjectUse, ObjectOrOtherUse))
compilePeepHoleObjectToObjectOrOtherEquality(node->child1(), node->child2(), branchNode);
else if (node->isBinaryUseKind(ObjectOrOtherUse, ObjectUse))
compilePeepHoleObjectToObjectOrOtherEquality(node->child2(), node->child1(), branchNode);
+ else if (!needsTypeCheck(node->child1(), SpecOther))
+ nonSpeculativePeepholeBranchNullOrUndefined(node->child2(), branchNode);
+ else if (!needsTypeCheck(node->child2(), SpecOther))
+ nonSpeculativePeepholeBranchNullOrUndefined(node->child1(), branchNode);
else {
nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
return true;
variable->machineLocal(),
format));
}
-
- m_codeOriginForExitTarget = CodeOrigin();
- m_codeOriginForExitProfile = CodeOrigin();
+
+ m_origin = NodeOrigin();
for (m_indexInBlock = 0; m_indexInBlock < m_block->size(); ++m_indexInBlock) {
m_currentNode = m_block->at(m_indexInBlock);
return;
}
- if (ASSERT_DISABLED)
- m_canExit = true; // Essentially disable the assertions.
- else
- m_canExit = mayExit(m_jit.graph(), m_currentNode);
-
m_interpreter.startExecuting();
m_jit.setForNode(m_currentNode);
- m_codeOriginForExitTarget = m_currentNode->origin.forExit;
- m_codeOriginForExitProfile = m_currentNode->origin.semantic;
+ m_origin = m_currentNode->origin;
+ if (validationEnabled())
+ m_origin.exitOK &= mayExit(m_jit.graph(), m_currentNode) == Exits;
m_lastGeneratedNode = m_currentNode->op();
ASSERT(m_currentNode->shouldGenerate());
dataLog("\n");
}
+ m_jit.jitAssertNoException();
+
compile(m_currentNode);
if (belongsInMinifiedGraph(m_currentNode->op()))
void SpeculativeJIT::checkArgumentTypes()
{
ASSERT(!m_currentNode);
- m_isCheckingArgumentTypes = true;
- m_codeOriginForExitTarget = CodeOrigin(0);
- m_codeOriginForExitProfile = CodeOrigin(0);
+ m_origin = NodeOrigin(CodeOrigin(0), CodeOrigin(0), true);
for (int i = 0; i < m_jit.codeBlock()->numParameters(); ++i) {
Node* node = m_jit.graph().m_arguments[i];
}
#endif
}
- m_isCheckingArgumentTypes = false;
+
+ m_origin = NodeOrigin();
}
bool SpeculativeJIT::compile()
m_jit.noticeOSREntry(*block, m_osrEntryHeads[osrEntryIndex++], linkBuffer);
}
ASSERT(osrEntryIndex == m_osrEntryHeads.size());
+
+ if (verboseCompilationEnabled()) {
+ DumpContext dumpContext;
+ dataLog("OSR Entries:\n");
+ for (OSREntryData& entryData : m_jit.jitCode()->osrEntry)
+ dataLog(" ", inContext(entryData, &dumpContext), "\n");
+ if (!dumpContext.isEmpty())
+ dumpContext.dump(WTF::dataFile());
+ }
}
void SpeculativeJIT::compileDoublePutByVal(Node* node, SpeculateCellOperand& base, SpeculateStrictInt32Operand& property)
GPRReg gpr = result.gpr();
JITCompiler::Jump notTruncatedToInteger = m_jit.branchTruncateDoubleToInt32(fpr, gpr, JITCompiler::BranchIfTruncateFailed);
- addSlowPathGenerator(slowPathCall(notTruncatedToInteger, this, toInt32, gpr, fpr));
+ addSlowPathGenerator(slowPathCall(notTruncatedToInteger, this, toInt32, gpr, fpr, NeedToSpill, ExceptionCheckRequirement::CheckNotNeeded));
int32Result(gpr, node);
return;
void SpeculativeJIT::compileDoubleRep(Node* node)
{
switch (node->child1().useKind()) {
+ case RealNumberUse: {
+ JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
+ FPRTemporary result(this);
+
+ JSValueRegs op1Regs = op1.jsValueRegs();
+ FPRReg resultFPR = result.fpr();
+
+#if USE(JSVALUE64)
+ GPRTemporary temp(this);
+ GPRReg tempGPR = temp.gpr();
+ m_jit.move(op1Regs.gpr(), tempGPR);
+ m_jit.unboxDoubleWithoutAssertions(tempGPR, resultFPR);
+#else
+ FPRTemporary temp(this);
+ FPRReg tempFPR = temp.fpr();
+ unboxDouble(op1Regs.tagGPR(), op1Regs.payloadGPR(), resultFPR, tempFPR);
+#endif
+
+ JITCompiler::Jump done = m_jit.branchDouble(
+ JITCompiler::DoubleEqual, resultFPR, resultFPR);
+
+ DFG_TYPE_CHECK(
+ op1Regs, node->child1(), SpecBytecodeRealNumber, m_jit.branchIfNotInt32(op1Regs));
+ m_jit.convertInt32ToDouble(op1Regs.payloadGPR(), resultFPR);
+
+ done.link(&m_jit);
+
+ doubleResult(resultFPR, node);
+ return;
+ }
+
+ case NotCellUse:
case NumberUse: {
ASSERT(!node->child1()->isNumberConstant()); // This should have been constant folded.
-
- if (isInt32Speculation(m_state.forNode(node->child1()).m_type)) {
+
+ SpeculatedType possibleTypes = m_state.forNode(node->child1()).m_type;
+ if (isInt32Speculation(possibleTypes)) {
SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
FPRTemporary result(this);
m_jit.convertInt32ToDouble(op1.gpr(), result.fpr());
doubleResult(result.fpr(), node);
return;
}
-
+
JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
FPRTemporary result(this);
-
+
#if USE(JSVALUE64)
GPRTemporary temp(this);
GPRReg op1GPR = op1.gpr();
GPRReg tempGPR = temp.gpr();
FPRReg resultFPR = result.fpr();
-
+ JITCompiler::JumpList done;
+
JITCompiler::Jump isInteger = m_jit.branch64(
MacroAssembler::AboveOrEqual, op1GPR, GPRInfo::tagTypeNumberRegister);
-
- if (needsTypeCheck(node->child1(), SpecBytecodeNumber)) {
+
+ if (node->child1().useKind() == NotCellUse) {
+ JITCompiler::Jump isNumber = m_jit.branchTest64(MacroAssembler::NonZero, op1GPR, GPRInfo::tagTypeNumberRegister);
+ JITCompiler::Jump isUndefined = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueUndefined));
+
+ static const double zero = 0;
+ m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&zero), resultFPR);
+
+ JITCompiler::Jump isNull = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueNull));
+ done.append(isNull);
+
+ DFG_TYPE_CHECK(JSValueRegs(op1GPR), node->child1(), ~SpecCell,
+ m_jit.branchTest64(JITCompiler::Zero, op1GPR, TrustedImm32(static_cast<int32_t>(TagBitBool))));
+
+ JITCompiler::Jump isFalse = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(ValueFalse));
+ static const double one = 1;
+ m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&one), resultFPR);
+ done.append(m_jit.jump());
+ done.append(isFalse);
+
+ isUndefined.link(&m_jit);
+ static const double NaN = PNaN;
+ m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&NaN), resultFPR);
+ done.append(m_jit.jump());
+
+ isNumber.link(&m_jit);
+ } else if (needsTypeCheck(node->child1(), SpecBytecodeNumber)) {
typeCheck(
JSValueRegs(op1GPR), node->child1(), SpecBytecodeNumber,
m_jit.branchTest64(MacroAssembler::Zero, op1GPR, GPRInfo::tagTypeNumberRegister));
m_jit.move(op1GPR, tempGPR);
unboxDouble(tempGPR, resultFPR);
- JITCompiler::Jump done = m_jit.jump();
+ done.append(m_jit.jump());
isInteger.link(&m_jit);
m_jit.convertInt32ToDouble(op1GPR, resultFPR);
GPRReg op1PayloadGPR = op1.payloadGPR();
FPRReg tempFPR = temp.fpr();
FPRReg resultFPR = result.fpr();
+ JITCompiler::JumpList done;
JITCompiler::Jump isInteger = m_jit.branch32(
MacroAssembler::Equal, op1TagGPR, TrustedImm32(JSValue::Int32Tag));
-
- if (needsTypeCheck(node->child1(), SpecBytecodeNumber)) {
+
+ if (node->child1().useKind() == NotCellUse) {
+ JITCompiler::Jump isNumber = m_jit.branch32(JITCompiler::Below, op1TagGPR, JITCompiler::TrustedImm32(JSValue::LowestTag + 1));
+ JITCompiler::Jump isUndefined = m_jit.branch32(JITCompiler::Equal, op1TagGPR, TrustedImm32(JSValue::UndefinedTag));
+
+ static const double zero = 0;
+ m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&zero), resultFPR);
+
+ JITCompiler::Jump isNull = m_jit.branch32(JITCompiler::Equal, op1TagGPR, TrustedImm32(JSValue::NullTag));
+ done.append(isNull);
+
+ DFG_TYPE_CHECK(JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), ~SpecCell, m_jit.branch32(JITCompiler::NotEqual, op1TagGPR, TrustedImm32(JSValue::BooleanTag)));
+
+ JITCompiler::Jump isFalse = m_jit.branchTest32(JITCompiler::Zero, op1PayloadGPR, TrustedImm32(1));
+ static const double one = 1;
+ m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&one), resultFPR);
+ done.append(m_jit.jump());
+ done.append(isFalse);
+
+ isUndefined.link(&m_jit);
+ static const double NaN = PNaN;
+ m_jit.loadDouble(MacroAssembler::TrustedImmPtr(&NaN), resultFPR);
+ done.append(m_jit.jump());
+
+ isNumber.link(&m_jit);
+ } else if (needsTypeCheck(node->child1(), SpecBytecodeNumber)) {
typeCheck(
JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), SpecBytecodeNumber,
m_jit.branch32(MacroAssembler::AboveOrEqual, op1TagGPR, TrustedImm32(JSValue::LowestTag)));
}
-
+
unboxDouble(op1TagGPR, op1PayloadGPR, resultFPR, tempFPR);
- JITCompiler::Jump done = m_jit.jump();
+ done.append(m_jit.jump());
isInteger.link(&m_jit);
m_jit.convertInt32ToDouble(op1PayloadGPR, resultFPR);
MacroAssembler::Jump failed = m_jit.branchTruncateDoubleToInt32(
fpr, gpr, MacroAssembler::BranchIfTruncateFailed);
- addSlowPathGenerator(slowPathCall(failed, this, toInt32, gpr, fpr));
+ addSlowPathGenerator(slowPathCall(failed, this, toInt32, gpr, fpr, NeedToSpill, ExceptionCheckRequirement::CheckNotNeeded));
fixed.link(&m_jit);
value.adopt(result);
blessedBooleanResult(scratchReg, node);
}
-void SpeculativeJIT::compileAdd(Node* node)
+void SpeculativeJIT::compileValueAdd(Node* node)
+{
+ if (isKnownNotNumber(node->child1().node()) || isKnownNotNumber(node->child2().node())) {
+ JSValueOperand left(this, node->child1());
+ JSValueOperand right(this, node->child2());
+ JSValueRegs leftRegs = left.jsValueRegs();
+ JSValueRegs rightRegs = right.jsValueRegs();
+#if USE(JSVALUE64)
+ GPRTemporary result(this);
+ JSValueRegs resultRegs = JSValueRegs(result.gpr());
+#else
+ GPRTemporary resultTag(this);
+ GPRTemporary resultPayload(this);
+ JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
+#endif
+ flushRegisters();
+ callOperation(operationValueAddNotNumber, resultRegs, leftRegs, rightRegs);
+ m_jit.exceptionCheck();
+
+ jsValueResult(resultRegs, node);
+ return;
+ }
+
+ bool leftIsConstInt32 = node->child1()->isInt32Constant();
+ bool rightIsConstInt32 = node->child2()->isInt32Constant();
+
+ // The DFG does not always fold the sum of 2 constant int operands together.
+ if (leftIsConstInt32 && rightIsConstInt32) {
+#if USE(JSVALUE64)
+ GPRTemporary result(this);
+ JSValueRegs resultRegs = JSValueRegs(result.gpr());
+#else
+ GPRTemporary resultTag(this);
+ GPRTemporary resultPayload(this);
+ JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
+#endif
+ int64_t leftConst = node->child1()->asInt32();
+ int64_t rightConst = node->child2()->asInt32();
+ int64_t resultConst = leftConst + rightConst;
+ m_jit.moveValue(JSValue(resultConst), resultRegs);
+ jsValueResult(resultRegs, node);
+ return;
+ }
+
+ Optional<JSValueOperand> left;
+ Optional<JSValueOperand> right;
+
+ JSValueRegs leftRegs;
+ JSValueRegs rightRegs;
+
+ FPRTemporary leftNumber(this);
+ FPRTemporary rightNumber(this);
+ FPRReg leftFPR = leftNumber.fpr();
+ FPRReg rightFPR = rightNumber.fpr();
+
+#if USE(JSVALUE64)
+ GPRTemporary result(this);
+ JSValueRegs resultRegs = JSValueRegs(result.gpr());
+ GPRTemporary scratch(this);
+ GPRReg scratchGPR = scratch.gpr();
+ FPRReg scratchFPR = InvalidFPRReg;
+#else
+ GPRTemporary resultTag(this);
+ GPRTemporary resultPayload(this);
+ JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
+ GPRReg scratchGPR = resultTag.gpr();
+ FPRTemporary fprScratch(this);
+ FPRReg scratchFPR = fprScratch.fpr();
+#endif
+
+ ResultType leftType = m_state.forNode(node->child1()).resultType();
+ ResultType rightType = m_state.forNode(node->child2()).resultType();
+ int32_t leftConstInt32 = 0;
+ int32_t rightConstInt32 = 0;
+
+ ASSERT(!leftIsConstInt32 || !rightIsConstInt32);
+
+ if (leftIsConstInt32) {
+ leftConstInt32 = node->child1()->asInt32();
+ right = JSValueOperand(this, node->child2());
+ rightRegs = right->jsValueRegs();
+ } else if (rightIsConstInt32) {
+ left = JSValueOperand(this, node->child1());
+ leftRegs = left->jsValueRegs();
+ rightConstInt32 = node->child2()->asInt32();
+ } else {
+ left = JSValueOperand(this, node->child1());
+ leftRegs = left->jsValueRegs();
+ right = JSValueOperand(this, node->child2());
+ rightRegs = right->jsValueRegs();
+ }
+
+ JITAddGenerator gen(resultRegs, leftRegs, rightRegs, leftType, rightType,
+ leftIsConstInt32, rightIsConstInt32, leftConstInt32, rightConstInt32,
+ leftFPR, rightFPR, scratchGPR, scratchFPR);
+ gen.generateFastPath(m_jit);
+
+ ASSERT(gen.didEmitFastPath());
+ gen.endJumpList().append(m_jit.jump());
+
+ gen.slowPathJumpList().link(&m_jit);
+
+ silentSpillAllRegisters(resultRegs);
+
+ if (leftIsConstInt32) {
+ leftRegs = resultRegs;
+ int64_t leftConst = node->child1()->asInt32();
+ m_jit.moveValue(JSValue(leftConst), leftRegs);
+ } else if (rightIsConstInt32) {
+ rightRegs = resultRegs;
+ int64_t rightConst = node->child2()->asInt32();
+ m_jit.moveValue(JSValue(rightConst), rightRegs);
+ }
+
+ callOperation(operationValueAdd, resultRegs, leftRegs, rightRegs);
+
+ silentFillAllRegisters(resultRegs);
+ m_jit.exceptionCheck();
+
+ gen.endJumpList().link(&m_jit);
+ jsValueResult(resultRegs, node);
+ return;
+}
+
+void SpeculativeJIT::compileArithAdd(Node* node)
{
switch (node->binaryUseKind()) {
case Int32Use: {
case Int32Use: {
ASSERT(!shouldCheckNegativeZero(node->arithMode()));
- if (node->child2()->isNumberConstant()) {
+ if (node->child2()->isInt32Constant()) {
SpeculateInt32Operand op1(this, node->child1());
int32_t imm2 = node->child2()->asInt32();
GPRTemporary result(this);
return;
}
- if (node->child1()->isNumberConstant()) {
+ if (node->child1()->isInt32Constant()) {
int32_t imm1 = node->child1()->asInt32();
SpeculateInt32Operand op2(this, node->child2());
GPRTemporary result(this);
doubleResult(result.fpr(), node);
return;
}
-
+
+ case UntypedUse: {
+ JSValueOperand left(this, node->child1());
+ JSValueOperand right(this, node->child2());
+
+ JSValueRegs leftRegs = left.jsValueRegs();
+ JSValueRegs rightRegs = right.jsValueRegs();
+
+ ResultType leftType = m_state.forNode(node->child1()).resultType();
+ ResultType rightType = m_state.forNode(node->child2()).resultType();
+
+ FPRTemporary leftNumber(this);
+ FPRTemporary rightNumber(this);
+ FPRReg leftFPR = leftNumber.fpr();
+ FPRReg rightFPR = rightNumber.fpr();
+
+#if USE(JSVALUE64)
+ GPRTemporary result(this);
+ JSValueRegs resultRegs = JSValueRegs(result.gpr());
+ GPRTemporary scratch(this);
+ GPRReg scratchGPR = scratch.gpr();
+ FPRReg scratchFPR = InvalidFPRReg;
+#else
+ GPRTemporary resultTag(this);
+ GPRTemporary resultPayload(this);
+ JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
+ GPRReg scratchGPR = resultTag.gpr();
+ FPRTemporary fprScratch(this);
+ FPRReg scratchFPR = fprScratch.fpr();
+#endif
+
+ JITSubGenerator gen(resultRegs, leftRegs, rightRegs, leftType, rightType,
+ leftFPR, rightFPR, scratchGPR, scratchFPR);
+ gen.generateFastPath(m_jit);
+
+ ASSERT(gen.didEmitFastPath());
+ gen.endJumpList().append(m_jit.jump());
+
+ gen.slowPathJumpList().link(&m_jit);
+ silentSpillAllRegisters(resultRegs);
+ callOperation(operationValueSub, resultRegs, leftRegs, rightRegs);
+ silentFillAllRegisters(resultRegs);
+ m_jit.exceptionCheck();
+
+ gen.endJumpList().link(&m_jit);
+ jsValueResult(resultRegs, node);
+ return;
+ }
+
default:
RELEASE_ASSERT_NOT_REACHED();
return;
}
m_jit.move(op1GPR, eax.gpr());
- m_jit.assembler().cdq();
- m_jit.assembler().idivl_r(op2GPR);
+ m_jit.x86ConvertToDoubleWord32();
+ m_jit.x86Div32(op2GPR);
if (op2TempGPR != InvalidGPRReg)
unlock(op2TempGPR);
numeratorNonZero.link(&m_jit);
}
+ if (shouldCheckOverflow(node->arithMode()))
+ speculationCheck(Overflow, JSValueRegs(), nullptr, m_jit.branchTest32(MacroAssembler::Zero, op2GPR));
+
m_jit.assembler().sdiv<32>(quotient.gpr(), op1GPR, op2GPR);
// Check that there was no remainder. If there had been, then we'd be obligated to
if (node->child2()->isInt32Constant()) {
int32_t divisor = node->child2()->asInt32();
if (divisor > 1 && hasOneBitSet(divisor)) {
- unsigned logarithm = WTF::fastLog2(divisor);
+ unsigned logarithm = WTF::fastLog2(static_cast<uint32_t>(divisor));
GPRReg dividendGPR = op1.gpr();
GPRTemporary result(this);
GPRReg resultGPR = result.gpr();
m_jit.move(op1Gpr, eax.gpr());
m_jit.move(TrustedImm32(divisor), scratchGPR);
- m_jit.assembler().cdq();
- m_jit.assembler().idivl_r(scratchGPR);
+ m_jit.x86ConvertToDoubleWord32();
+ m_jit.x86Div32(scratchGPR);
if (shouldCheckNegativeZero(node->arithMode())) {
JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, op1SaveGPR, TrustedImm32(0));
speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, edx.gpr()));
}
m_jit.move(op1GPR, eax.gpr());
- m_jit.assembler().cdq();
- m_jit.assembler().idivl_r(op2GPR);
+ m_jit.x86ConvertToDoubleWord32();
+ m_jit.x86Div32(op2GPR);
if (op2TempGPR != InvalidGPRReg)
unlock(op2TempGPR);
FPRResult roundedResultAsDouble(this);
FPRReg resultFPR = roundedResultAsDouble.fpr();
callOperation(jsRound, resultFPR, valueFPR);
+ m_jit.exceptionCheck();
if (producesInteger(node->arithRoundingMode())) {
GPRTemporary roundedResultAsInt32(this);
FPRTemporary scratch(this);
SpeculateDoubleOperand op1(this, node->child1());
FPRReg op1FPR = op1.fpr();
- if (!MacroAssembler::supportsFloatingPointSqrt() || !Options::enableArchitectureSpecificOptimizations()) {
+ if (!MacroAssembler::supportsFloatingPointSqrt() || !Options::useArchitectureSpecificOptimizations()) {
flushRegisters();
FPRResult result(this);
callOperation(sqrt, result.fpr(), op1FPR);
compileStringIdentEquality(node);
return false;
}
+
+ if (node->isBinaryUseKind(SymbolUse)) {
+ compileSymbolEquality(node);
+ return false;
+ }
if (node->isBinaryUseKind(ObjectUse)) {
compileObjectEquality(node);
compileObjectToObjectOrOtherEquality(node->child2(), node->child1());
return false;
}
+
+ if (!needsTypeCheck(node->child1(), SpecOther)) {
+ nonSpeculativeNonPeepholeCompareNullOrUndefined(node->child2());
+ return false;
+ }
+
+ if (!needsTypeCheck(node->child2(), SpecOther)) {
+ nonSpeculativeNonPeepholeCompareNullOrUndefined(node->child1());
+ return false;
+ }
}
-
+
nonSpeculativeNonPeepholeCompare(node, condition, operation);
return false;
}
bool SpeculativeJIT::compileStrictEq(Node* node)
{
+ // FIXME: Currently, we have op_jless, op_jgreater etc. But we don't have op_jeq, op_jstricteq etc.
+ // `==` and `===` operations with branching will be compiled to op_{eq,stricteq} and op_{jfalse,jtrue}.
+ // In DFG bytecodes, between op_eq and op_jfalse, we have MovHint to store the result of op_eq.
+ // As a result, detectPeepHoleBranch() never detects peep hole for that case.
+ // https://bugs.webkit.org/show_bug.cgi?id=149713
+
if (node->isBinaryUseKind(BooleanUse)) {
unsigned branchIndexInBlock = detectPeepHoleBranch();
if (branchIndexInBlock != UINT_MAX) {
compileDoubleCompare(node, MacroAssembler::DoubleEqual);
return false;
}
+
+ if (node->isBinaryUseKind(SymbolUse)) {
+ unsigned branchIndexInBlock = detectPeepHoleBranch();
+ if (branchIndexInBlock != UINT_MAX) {
+ Node* branchNode = m_block->at(branchIndexInBlock);
+ compilePeepHoleSymbolEquality(node, branchNode);
+ use(node->child1());
+ use(node->child2());
+ m_indexInBlock = branchIndexInBlock;
+ m_currentNode = branchNode;
+ return true;
+ }
+ compileSymbolEquality(node);
+ return false;
+ }
if (node->isBinaryUseKind(StringUse)) {
compileStringEquality(node);
return false;
}
+ if (node->isBinaryUseKind(ObjectUse, UntypedUse)) {
+ unsigned branchIndexInBlock = detectPeepHoleBranch();
+ if (branchIndexInBlock != UINT_MAX) {
+ Node* branchNode = m_block->at(branchIndexInBlock);
+ compilePeepHoleObjectStrictEquality(node->child1(), node->child2(), branchNode);
+ use(node->child1());
+ use(node->child2());
+ m_indexInBlock = branchIndexInBlock;
+ m_currentNode = branchNode;
+ return true;
+ }
+ compileObjectStrictEquality(node->child1(), node->child2());
+ return false;
+ }
+
+ if (node->isBinaryUseKind(UntypedUse, ObjectUse)) {
+ unsigned branchIndexInBlock = detectPeepHoleBranch();
+ if (branchIndexInBlock != UINT_MAX) {
+ Node* branchNode = m_block->at(branchIndexInBlock);
+ compilePeepHoleObjectStrictEquality(node->child2(), node->child1(), branchNode);
+ use(node->child1());
+ use(node->child2());
+ m_indexInBlock = branchIndexInBlock;
+ m_currentNode = branchNode;
+ return true;
+ }
+ compileObjectStrictEquality(node->child2(), node->child1());
+ return false;
+ }
+
if (node->isBinaryUseKind(ObjectUse)) {
unsigned branchIndexInBlock = detectPeepHoleBranch();
if (branchIndexInBlock != UINT_MAX) {
unblessedBooleanResult(result.gpr(), node);
}
+template<typename Functor>
+void SpeculativeJIT::extractStringImplFromBinarySymbols(Edge leftSymbolEdge, Edge rightSymbolEdge, const Functor& functor)
+{
+ SpeculateCellOperand left(this, leftSymbolEdge);
+ SpeculateCellOperand right(this, rightSymbolEdge);
+ GPRTemporary leftTemp(this);
+ GPRTemporary rightTemp(this);
+
+ GPRReg leftGPR = left.gpr();
+ GPRReg rightGPR = right.gpr();
+ GPRReg leftTempGPR = leftTemp.gpr();
+ GPRReg rightTempGPR = rightTemp.gpr();
+
+ speculateSymbol(leftSymbolEdge, leftGPR);
+ speculateSymbol(rightSymbolEdge, rightGPR);
+
+ m_jit.loadPtr(JITCompiler::Address(leftGPR, Symbol::offsetOfPrivateName()), leftTempGPR);
+ m_jit.loadPtr(JITCompiler::Address(rightGPR, Symbol::offsetOfPrivateName()), rightTempGPR);
+
+ functor(leftTempGPR, rightTempGPR);
+}
+
+void SpeculativeJIT::compileSymbolEquality(Node* node)
+{
+ extractStringImplFromBinarySymbols(node->child1(), node->child2(), [&] (GPRReg leftStringImpl, GPRReg rightStringImpl) {
+ m_jit.comparePtr(JITCompiler::Equal, leftStringImpl, rightStringImpl, leftStringImpl);
+ unblessedBooleanResult(leftStringImpl, node);
+ });
+}
+
+void SpeculativeJIT::compilePeepHoleSymbolEquality(Node* node, Node* branchNode)
+{
+ BasicBlock* taken = branchNode->branchData()->taken.block;
+ BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
+
+ extractStringImplFromBinarySymbols(node->child1(), node->child2(), [&] (GPRReg leftStringImpl, GPRReg rightStringImpl) {
+ if (taken == nextBlock()) {
+ branchPtr(JITCompiler::NotEqual, leftStringImpl, rightStringImpl, notTaken);
+ jump(taken);
+ } else {
+ branchPtr(JITCompiler::Equal, leftStringImpl, rightStringImpl, taken);
+ jump(notTaken);
+ }
+ });
+}
+
void SpeculativeJIT::compileStringEquality(
Node* node, GPRReg leftGPR, GPRReg rightGPR, GPRReg lengthGPR, GPRReg leftTempGPR,
GPRReg rightTempGPR, GPRReg leftTemp2GPR, GPRReg rightTemp2GPR,
default:
ASSERT(isTypedView(node->arrayMode().typedArrayType()));
- m_jit.loadPtr(
- MacroAssembler::Address(baseReg, JSArrayBufferView::offsetOfVector()),
- storageReg);
+
+ JITCompiler::Jump fail = m_jit.loadTypedArrayVector(baseReg, storageReg);
+
+ addSlowPathGenerator(
+ slowPathCall(fail, this, operationGetArrayBufferVector, storageReg, baseReg));
break;
}
MacroAssembler::NotEqual,
MacroAssembler::Address(baseGPR, JSArrayBufferView::offsetOfMode()),
TrustedImm32(WastefulTypedArray));
-
+
m_jit.loadPtr(MacroAssembler::Address(baseGPR, JSObject::butterflyOffset()), dataGPR);
+ m_jit.removeSpaceBits(dataGPR);
m_jit.loadPtr(MacroAssembler::Address(baseGPR, JSArrayBufferView::offsetOfVector()), vectorGPR);
+ JITCompiler::JumpList vectorReady;
+ vectorReady.append(m_jit.branchIfToSpace(vectorGPR));
+ vectorReady.append(m_jit.branchIfNotFastTypedArray(baseGPR));
+ m_jit.removeSpaceBits(vectorGPR);
+ vectorReady.link(&m_jit);
m_jit.loadPtr(MacroAssembler::Address(dataGPR, Butterfly::offsetOfArrayBuffer()), dataGPR);
m_jit.loadPtr(MacroAssembler::Address(dataGPR, ArrayBuffer::offsetOfData()), dataGPR);
m_jit.subPtr(dataGPR, vectorGPR);
m_jit.move(TrustedImmPtr(0), vectorGPR);
done.link(&m_jit);
-
+
int32Result(vectorGPR, node);
}
cellResult(result.gpr(), node);
}
+
+void SpeculativeJIT::compileLoadArrowFunctionThis(Node* node)
+{
+ SpeculateCellOperand function(this, node->child1());
+ GPRTemporary result(this, Reuse, function);
+ m_jit.loadPtr(JITCompiler::Address(function.gpr(), JSArrowFunction::offsetOfThisValue()), result.gpr());
+ cellResult(result.gpr(), node);
+}
+
void SpeculativeJIT::compileSkipScope(Node* node)
{
SpeculateCellOperand scope(this, node->child1());
break;
}
default: {
- ASSERT(isTypedView(node->arrayMode().typedArrayType()));
+ ASSERT(node->arrayMode().isSomeTypedArrayView());
SpeculateCellOperand base(this, node->child1());
GPRTemporary result(this, Reuse, base);
GPRReg baseGPR = base.gpr();
} }
}
+void SpeculativeJIT::compileCheckIdent(Node* node)
+{
+ SpeculateCellOperand operand(this, node->child1());
+ UniquedStringImpl* uid = node->uidOperand();
+ if (uid->isSymbol()) {
+ speculateSymbol(node->child1(), operand.gpr());
+ speculationCheck(
+ BadIdent, JSValueSource(), nullptr,
+ m_jit.branchPtr(
+ JITCompiler::NotEqual,
+ JITCompiler::Address(operand.gpr(), Symbol::offsetOfPrivateName()),
+ TrustedImmPtr(uid)));
+ } else {
+ speculateString(node->child1(), operand.gpr());
+ speculateStringIdent(node->child1(), operand.gpr());
+ speculationCheck(
+ BadIdent, JSValueSource(), nullptr,
+ m_jit.branchPtr(
+ JITCompiler::NotEqual,
+ JITCompiler::Address(operand.gpr(), JSString::offsetOfValue()),
+ TrustedImmPtr(uid)));
+ }
+ noResult(node);
+}
+
+template <typename ClassType> void SpeculativeJIT::compileNewFunctionCommon(GPRReg resultGPR, Structure* structure, GPRReg scratch1GPR, GPRReg scratch2GPR, GPRReg scopeGPR, MacroAssembler::JumpList& slowPath, size_t size, FunctionExecutable* executable, ptrdiff_t offsetOfScopeChain, ptrdiff_t offsetOfExecutable, ptrdiff_t offsetOfRareData)
+{
+ emitAllocateJSObjectWithKnownSize<ClassType>(resultGPR, TrustedImmPtr(structure), TrustedImmPtr(0), scratch1GPR, scratch2GPR, slowPath, size);
+
+ m_jit.storePtr(scopeGPR, JITCompiler::Address(resultGPR, offsetOfScopeChain));
+ m_jit.storePtr(TrustedImmPtr(executable), JITCompiler::Address(resultGPR, offsetOfExecutable));
+ m_jit.storePtr(TrustedImmPtr(0), JITCompiler::Address(resultGPR, offsetOfRareData));
+}
+
void SpeculativeJIT::compileNewFunction(Node* node)
{
+ NodeType nodeType = node->op();
+ ASSERT(nodeType == NewFunction || nodeType == NewArrowFunction);
+
SpeculateCellOperand scope(this, node->child1());
+#if USE(JSVALUE64)
+ GPRReg thisValueGPR;
+#else
+ GPRReg thisValuePayloadGPR;
+ GPRReg thisValueTagGPR;
+#endif
GPRReg scopeGPR = scope.gpr();
FunctionExecutable* executable = node->castOperand<FunctionExecutable*>();
+ if (nodeType == NewArrowFunction) {
+#if USE(JSVALUE64)
+ SpeculateCellOperand thisValue(this, node->child2());
+ thisValueGPR = thisValue.gpr();
+#else
+ JSValueOperand thisValue(this, node->child2(), ManualOperandSpeculation);
+ thisValuePayloadGPR = thisValue.payloadGPR();
+ thisValueTagGPR = thisValue.tagGPR();
+
+ DFG_TYPE_CHECK(thisValue.jsValueRegs(), node->child2(), SpecCell, m_jit.branchIfNotCell(thisValue.jsValueRegs()));
+#endif
+ }
+
if (executable->singletonFunction()->isStillValid()) {
GPRFlushedCallResult result(this);
GPRReg resultGPR = result.gpr();
-
+
flushRegisters();
-
- callOperation(operationNewFunction, resultGPR, scopeGPR, executable);
+
+ if (nodeType == NewArrowFunction)
+#if USE(JSVALUE64)
+ callOperation(operationNewArrowFunction, resultGPR, scopeGPR, executable, thisValueGPR);
+#else
+ callOperation(operationNewArrowFunction, resultGPR, scopeGPR, executable, thisValueTagGPR, thisValuePayloadGPR);
+#endif
+ else
+ callOperation(operationNewFunction, resultGPR, scopeGPR, executable);
+ m_jit.exceptionCheck();
cellResult(resultGPR, node);
return;
}
- Structure* structure = m_jit.graph().globalObjectFor(
- node->origin.semantic)->functionStructure();
-
+ Structure* structure = nodeType == NewArrowFunction
+ ? m_jit.graph().globalObjectFor(node->origin.semantic)->arrowFunctionStructure()
+ : m_jit.graph().globalObjectFor(node->origin.semantic)->functionStructure();
+
GPRTemporary result(this);
GPRTemporary scratch1(this);
GPRTemporary scratch2(this);
+
GPRReg resultGPR = result.gpr();
GPRReg scratch1GPR = scratch1.gpr();
GPRReg scratch2GPR = scratch2.gpr();
-
+
JITCompiler::JumpList slowPath;
- emitAllocateJSObjectWithKnownSize<JSFunction>(
- resultGPR, TrustedImmPtr(structure), TrustedImmPtr(0),
- scratch1GPR, scratch2GPR, slowPath, JSFunction::allocationSize(0));
-
- // Don't need a memory barriers since we just fast-created the function, so it
- // must be young.
- m_jit.storePtr(
- scopeGPR,
- JITCompiler::Address(resultGPR, JSFunction::offsetOfScopeChain()));
- m_jit.storePtr(
- TrustedImmPtr(executable),
- JITCompiler::Address(resultGPR, JSFunction::offsetOfExecutable()));
- m_jit.storePtr(
- TrustedImmPtr(0),
- JITCompiler::Address(resultGPR, JSFunction::offsetOfRareData()));
-
-
- addSlowPathGenerator(
- slowPathCall(
- slowPath, this, operationNewFunctionWithInvalidatedReallocationWatchpoint, resultGPR, scopeGPR, executable));
+
+ if (nodeType == NewFunction) {
+ compileNewFunctionCommon<JSFunction>(resultGPR, structure, scratch1GPR, scratch2GPR, scopeGPR, slowPath, JSFunction::allocationSize(0), executable, JSFunction::offsetOfScopeChain(), JSFunction::offsetOfExecutable(), JSFunction::offsetOfRareData());
+
+ addSlowPathGenerator(slowPathCall(slowPath, this, operationNewFunctionWithInvalidatedReallocationWatchpoint, resultGPR, scopeGPR, executable));
+ }
+
+ if (nodeType == NewArrowFunction) {
+ compileNewFunctionCommon<JSArrowFunction>(resultGPR, structure, scratch1GPR, scratch2GPR, scopeGPR, slowPath, JSArrowFunction::allocationSize(0), executable, JSArrowFunction::offsetOfScopeChain(), JSArrowFunction::offsetOfExecutable(), JSArrowFunction::offsetOfRareData());
+#if USE(JSVALUE64)
+ m_jit.storePtr(thisValueGPR, JITCompiler::Address(resultGPR, JSArrowFunction::offsetOfThisValue()));
+ addSlowPathGenerator(slowPathCall(slowPath, this, operationNewArrowFunctionWithInvalidatedReallocationWatchpoint, resultGPR, scopeGPR, executable, thisValueGPR));
+#else
+ m_jit.store32(thisValueTagGPR, MacroAssembler::Address(resultGPR, JSArrowFunction::offsetOfThisValue() + TagOffset));
+ m_jit.store32(thisValuePayloadGPR, MacroAssembler::Address(resultGPR, JSArrowFunction::offsetOfThisValue() + PayloadOffset));
+
+ addSlowPathGenerator(slowPathCall(slowPath, this, operationNewArrowFunctionWithInvalidatedReallocationWatchpoint, resultGPR, scopeGPR, executable, thisValueTagGPR, thisValuePayloadGPR));
+#endif
+ }
cellResult(resultGPR, node);
}
void SpeculativeJIT::compileCreateActivation(Node* node)
{
- SymbolTable* table = m_jit.graph().symbolTableFor(node->origin.semantic);
+ SymbolTable* table = node->castOperand<SymbolTable*>();
Structure* structure = m_jit.graph().globalObjectFor(
node->origin.semantic)->activationStructure();
SpeculateCellOperand scope(this, node->child1());
GPRReg scopeGPR = scope.gpr();
+ JSValue initializationValue = node->initializationValueForActivation();
+ ASSERT(initializationValue == jsUndefined() || initializationValue == jsTDZValue());
if (table->singletonScope()->isStillValid()) {
GPRFlushedCallResult result(this);
flushRegisters();
- callOperation(operationCreateActivationDirect, resultGPR, structure, scopeGPR, table);
+#if USE(JSVALUE64)
+ callOperation(operationCreateActivationDirect,
+ resultGPR, structure, scopeGPR, table, TrustedImm64(JSValue::encode(initializationValue)));
+#else
+ callOperation(operationCreateActivationDirect,
+ resultGPR, structure, scopeGPR, table, TrustedImm32(initializationValue.tag()), TrustedImm32(initializationValue.payload()));
+#endif
+ m_jit.exceptionCheck();
cellResult(resultGPR, node);
return;
}
TrustedImmPtr(table),
JITCompiler::Address(resultGPR, JSLexicalEnvironment::offsetOfSymbolTable()));
- // Must initialize all members to undefined.
+ // Must initialize all members to undefined or the TDZ empty value.
for (unsigned i = 0; i < table->scopeSize(); ++i) {
m_jit.storeTrustedValue(
- jsUndefined(),
+ initializationValue,
JITCompiler::Address(
resultGPR, JSLexicalEnvironment::offsetOfVariable(ScopeOffset(i))));
}
+#if USE(JSVALUE64)
+ addSlowPathGenerator(
+ slowPathCall(
+ slowPath, this, operationCreateActivationDirect, resultGPR, structure, scopeGPR, table, TrustedImm64(JSValue::encode(initializationValue))));
+#else
addSlowPathGenerator(
slowPathCall(
- slowPath, this, operationCreateActivationDirect, resultGPR, structure, scopeGPR, table));
+ slowPath, this, operationCreateActivationDirect, resultGPR, structure, scopeGPR, table, TrustedImm32(initializationValue.tag()), TrustedImm32(initializationValue.payload())));
+#endif
cellResult(resultGPR, node);
}
});
m_jit.setupArgument(0, [&] (GPRReg destGPR) { m_jit.move(GPRInfo::callFrameRegister, destGPR); });
- appendCallWithExceptionCheckSetResult(operationCreateScopedArguments, resultGPR);
+ appendCallSetResult(operationCreateScopedArguments, resultGPR);
+ m_jit.exceptionCheck();
cellResult(resultGPR, node);
}
});
m_jit.setupArgument(0, [&] (GPRReg destGPR) { m_jit.move(GPRInfo::callFrameRegister, destGPR); });
- appendCallWithExceptionCheckSetResult(operationCreateClonedArguments, resultGPR);
+ appendCallSetResult(operationCreateClonedArguments, resultGPR);
+ m_jit.exceptionCheck();
cellResult(resultGPR, node);
}
+void SpeculativeJIT::compileCopyRest(Node* node)
+{
+ ASSERT(node->op() == CopyRest);
+
+ SpeculateCellOperand array(this, node->child1());
+ GPRTemporary argumentsStart(this);
+ SpeculateStrictInt32Operand arrayLength(this, node->child2());
+
+ GPRReg arrayGPR = array.gpr();
+ GPRReg argumentsStartGPR = argumentsStart.gpr();
+ GPRReg arrayLengthGPR = arrayLength.gpr();
+
+ CCallHelpers::Jump done = m_jit.branch32(MacroAssembler::Equal, arrayLengthGPR, TrustedImm32(0));
+
+ emitGetArgumentStart(node->origin.semantic, argumentsStartGPR);
+ silentSpillAllRegisters(argumentsStartGPR);
+ // Arguments: 0:exec, 1:JSCell* array, 2:arguments start, 3:number of arguments to skip, 4:array length
+ callOperation(operationCopyRest, arrayGPR, argumentsStartGPR, Imm32(node->numberOfArgumentsToSkip()), arrayLengthGPR);
+ silentFillAllRegisters(argumentsStartGPR);
+ m_jit.exceptionCheck();
+
+ done.link(&m_jit);
+
+ noResult(node);
+}
+
+void SpeculativeJIT::compileGetRestLength(Node* node)
+{
+ ASSERT(node->op() == GetRestLength);
+
+ GPRTemporary result(this);
+ GPRReg resultGPR = result.gpr();
+
+ emitGetLength(node->origin.semantic, resultGPR);
+ CCallHelpers::Jump hasNonZeroLength = m_jit.branch32(MacroAssembler::Above, resultGPR, Imm32(node->numberOfArgumentsToSkip()));
+ m_jit.move(TrustedImm32(0), resultGPR);
+ CCallHelpers::Jump done = m_jit.jump();
+ hasNonZeroLength.link(&m_jit);
+ if (node->numberOfArgumentsToSkip())
+ m_jit.sub32(TrustedImm32(node->numberOfArgumentsToSkip()), resultGPR);
+ done.link(&m_jit);
+ int32Result(resultGPR, node);
+}
+
void SpeculativeJIT::compileNotifyWrite(Node* node)
{
WatchpointSet* set = node->watchpointSet();
TrustedImm32(IsInvalidated));
addSlowPathGenerator(
- slowPathCall(slowCase, this, operationNotifyWrite, NoResult, set));
+ slowPathCall(slowCase, this, operationNotifyWrite, NoResult, set, NeedToSpill, ExceptionCheckRequirement::CheckNotNeeded));
noResult(node);
}
flushRegisters();
GPRFlushedCallResult result(this);
callOperation(operationRegExpTest, result.gpr(), baseGPR, argumentGPR);
+ m_jit.exceptionCheck();
branchTest32(invert ? JITCompiler::Zero : JITCompiler::NonZero, result.gpr(), taken);
jump(notTaken);
cellResult(resultGPR, node);
}
+void SpeculativeJIT::compileCheckStructure(Node* node, GPRReg cellGPR, GPRReg tempGPR)
+{
+ ASSERT(node->structureSet().size());
+
+ if (node->structureSet().size() == 1) {
+ speculationCheck(
+ BadCache, JSValueSource::unboxedCell(cellGPR), 0,
+ m_jit.branchWeakStructure(
+ JITCompiler::NotEqual,
+ JITCompiler::Address(cellGPR, JSCell::structureIDOffset()),
+ node->structureSet()[0]));
+ } else {
+ std::unique_ptr<GPRTemporary> structure;
+ GPRReg structureGPR;
+
+ if (tempGPR == InvalidGPRReg) {
+ structure = std::make_unique<GPRTemporary>(this);
+ structureGPR = structure->gpr();
+ } else
+ structureGPR = tempGPR;
+
+ m_jit.load32(JITCompiler::Address(cellGPR, JSCell::structureIDOffset()), structureGPR);
+
+ JITCompiler::JumpList done;
+
+ for (size_t i = 0; i < node->structureSet().size() - 1; ++i) {
+ done.append(
+ m_jit.branchWeakStructure(JITCompiler::Equal, structureGPR, node->structureSet()[i]));
+ }
+
+ speculationCheck(
+ BadCache, JSValueSource::unboxedCell(cellGPR), 0,
+ m_jit.branchWeakStructure(
+ JITCompiler::NotEqual, structureGPR, node->structureSet().last()));
+
+ done.link(&m_jit);
+ }
+}
+
+void SpeculativeJIT::compileCheckStructure(Node* node)
+{
+ switch (node->child1().useKind()) {
+ case CellUse:
+ case KnownCellUse: {
+ SpeculateCellOperand cell(this, node->child1());
+ compileCheckStructure(node, cell.gpr(), InvalidGPRReg);
+ noResult(node);
+ return;
+ }
+
+ case CellOrOtherUse: {
+ JSValueOperand value(this, node->child1(), ManualOperandSpeculation);
+ GPRTemporary temp(this);
+
+ JSValueRegs valueRegs = value.jsValueRegs();
+ GPRReg tempGPR = temp.gpr();
+
+ JITCompiler::Jump cell = m_jit.branchIfCell(valueRegs);
+ DFG_TYPE_CHECK(
+ valueRegs, node->child1(), SpecCell | SpecOther,
+ m_jit.branchIfNotOther(valueRegs, tempGPR));
+ JITCompiler::Jump done = m_jit.jump();
+ cell.link(&m_jit);
+ compileCheckStructure(node, valueRegs.payloadGPR(), tempGPR);
+ done.link(&m_jit);
+ noResult(node);
+ return;
+ }
+
+ default:
+ DFG_CRASH(m_jit.graph(), node, "Bad use kind");
+ return;
+ }
+}
+
void SpeculativeJIT::compileAllocatePropertyStorage(Node* node)
{
if (node->transition()->previous->couldHaveIndexingHeader()) {
GPRFlushedCallResult result(this);
callOperation(operationReallocateButterflyToHavePropertyStorageWithInitialCapacity, result.gpr(), baseGPR);
+ m_jit.exceptionCheck();
storageResult(result.gpr(), node);
return;
GPRFlushedCallResult result(this);
callOperation(operationReallocateButterflyToGrowPropertyStorage, result.gpr(), baseGPR, newSize / sizeof(JSValue));
+ m_jit.exceptionCheck();
storageResult(result.gpr(), node);
return;
storageResult(scratchGPR1, node);
}
+void SpeculativeJIT::compileGetButterfly(Node* node)
+{
+ SpeculateCellOperand base(this, node->child1());
+ GPRTemporary result(this, Reuse, base);
+
+ GPRReg baseGPR = base.gpr();
+ GPRReg resultGPR = result.gpr();
+
+ m_jit.loadPtr(JITCompiler::Address(baseGPR, JSObject::butterflyOffset()), resultGPR);
+
+ switch (node->op()) {
+ case GetButterfly:
+ addSlowPathGenerator(
+ slowPathCall(
+ m_jit.branchIfNotToSpace(resultGPR),
+ this, operationGetButterfly, resultGPR, baseGPR));
+ break;
+
+ case GetButterflyReadOnly:
+ m_jit.removeSpaceBits(resultGPR);
+ break;
+
+ default:
+ DFG_CRASH(m_jit.graph(), node, "Bad node type");
+ break;
+ }
+
+ storageResult(resultGPR, node);
+}
+
GPRReg SpeculativeJIT::temporaryRegisterForPutByVal(GPRTemporary& temporary, ArrayMode arrayMode)
{
if (!putByValWillNeedExtraRegister(arrayMode))
GPRReg resultGPR = result.gpr();
m_jit.load32(JITCompiler::Address(op1GPR, JSCell::structureIDOffset()), resultGPR);
- JITCompiler::Jump isString = m_jit.branchStructurePtr(
+ JITCompiler::Jump isString = m_jit.branchStructure(
JITCompiler::Equal,
resultGPR,
m_jit.vm()->stringStructure.get());
ASSERT(node->op() == CallStringConstructor);
callOperation(operationCallStringConstructorOnCell, resultGPR, op1GPR);
}
+ m_jit.exceptionCheck();
if (done.isSet())
done.link(&m_jit);
cellResult(resultGPR, node);
#endif
}
-void SpeculativeJIT::speculateDoubleReal(Edge edge)
+void SpeculativeJIT::speculateRealNumber(Edge edge)
+{
+ if (!needsTypeCheck(edge, SpecBytecodeRealNumber))
+ return;
+
+ JSValueOperand op1(this, edge, ManualOperandSpeculation);
+ FPRTemporary result(this);
+
+ JSValueRegs op1Regs = op1.jsValueRegs();
+ FPRReg resultFPR = result.fpr();
+
+#if USE(JSVALUE64)
+ GPRTemporary temp(this);
+ GPRReg tempGPR = temp.gpr();
+ m_jit.move(op1Regs.gpr(), tempGPR);
+ m_jit.unboxDoubleWithoutAssertions(tempGPR, resultFPR);
+#else
+ FPRTemporary temp(this);
+ FPRReg tempFPR = temp.fpr();
+ unboxDouble(op1Regs.tagGPR(), op1Regs.payloadGPR(), resultFPR, tempFPR);
+#endif
+
+ JITCompiler::Jump done = m_jit.branchDouble(
+ JITCompiler::DoubleEqual, resultFPR, resultFPR);
+
+ typeCheck(op1Regs, edge, SpecBytecodeRealNumber, m_jit.branchIfNotInt32(op1Regs));
+
+ done.link(&m_jit);
+}
+
+void SpeculativeJIT::speculateDoubleRepReal(Edge edge)
{
if (!needsTypeCheck(edge, SpecDoubleReal))
return;
(SpeculateCellOperand(this, edge)).gpr();
}
+void SpeculativeJIT::speculateCellOrOther(Edge edge)
+{
+ if (!needsTypeCheck(edge, SpecCell | SpecOther))
+ return;
+
+ JSValueOperand operand(this, edge, ManualOperandSpeculation);
+ GPRTemporary temp(this);
+ GPRReg tempGPR = temp.gpr();
+
+ MacroAssembler::Jump ok = m_jit.branchIfCell(operand.jsValueRegs());
+ DFG_TYPE_CHECK(
+ operand.jsValueRegs(), edge, SpecCell | SpecOther,
+ m_jit.branchIfNotOther(operand.jsValueRegs(), tempGPR));
+ ok.link(&m_jit);
+}
+
void SpeculativeJIT::speculateObject(Edge edge)
{
if (!needsTypeCheck(edge, SpecObject))
GPRReg structureIDGPR = structureID.gpr();
m_jit.load32(JITCompiler::Address(gpr, JSCell::structureIDOffset()), structureIDGPR);
- JITCompiler::Jump isString = m_jit.branchStructurePtr(
+ JITCompiler::Jump isString = m_jit.branchStructure(
JITCompiler::Equal,
structureIDGPR,
m_jit.vm()->stringStructure.get());
notCell.link(&m_jit);
}
+void SpeculativeJIT::speculateSymbol(Edge edge, GPRReg cell)
+{
+ DFG_TYPE_CHECK(JSValueSource::unboxedCell(cell), edge, SpecSymbol, m_jit.branchIfNotSymbol(cell));
+}
+
+void SpeculativeJIT::speculateSymbol(Edge edge)
+{
+ if (!needsTypeCheck(edge, SpecSymbol))
+ return;
+
+ SpeculateCellOperand operand(this, edge);
+ speculateSymbol(edge, operand.gpr());
+}
+
void SpeculativeJIT::speculateNotCell(Edge edge)
{
if (!needsTypeCheck(edge, ~SpecCell))
case KnownStringUse:
ASSERT(!needsTypeCheck(edge, SpecString));
break;
+ case KnownPrimitiveUse:
+ ASSERT(!needsTypeCheck(edge, SpecHeapTop & ~SpecObject));
+ break;
case Int32Use:
speculateInt32(edge);
break;
case NumberUse:
speculateNumber(edge);
break;
+ case RealNumberUse:
+ speculateRealNumber(edge);
+ break;
case DoubleRepRealUse:
- speculateDoubleReal(edge);
+ speculateDoubleRepReal(edge);
break;
#if USE(JSVALUE64)
case MachineIntUse:
case BooleanUse:
speculateBoolean(edge);
break;
+ case KnownBooleanUse:
+ ASSERT(!needsTypeCheck(edge, SpecBoolean));
+ break;
case CellUse:
speculateCell(edge);
break;
+ case CellOrOtherUse:
+ speculateCellOrOther(edge);
+ break;
case ObjectUse:
speculateObject(edge);
break;
case StringUse:
speculateString(edge);
break;
+ case SymbolUse:
+ speculateSymbol(edge);
+ break;
case StringObjectUse:
speculateStringObject(edge);
break;
silentSpillAllRegisters(scratch);
callOperation(operationFindSwitchImmTargetForDouble, scratch, valueRegs, data->switchTableIndex);
silentFillAllRegisters(scratch);
+
m_jit.jump(scratch);
#endif
noResult(node, UseChildrenCalledExplicitly);
flushRegisters();
callOperation(
operationSwitchString, string, data->switchTableIndex, string);
+ m_jit.exceptionCheck();
m_jit.jump(string);
return;
}
silentSpillAllRegisters(string);
callOperation(operationSwitchString, string, data->switchTableIndex, string);
silentFillAllRegisters(string);
+ m_jit.exceptionCheck();
m_jit.jump(string);
}
}
}
-#if ENABLE(GGC)
void SpeculativeJIT::compileStoreBarrier(Node* node)
{
ASSERT(node->op() == StoreBarrier);
void SpeculativeJIT::storeToWriteBarrierBuffer(GPRReg cell, GPRReg scratch1, GPRReg scratch2)
{
ASSERT(scratch1 != scratch2);
- WriteBarrierBuffer* writeBarrierBuffer = &m_jit.vm()->heap.m_writeBarrierBuffer;
- m_jit.move(TrustedImmPtr(writeBarrierBuffer), scratch1);
- m_jit.load32(MacroAssembler::Address(scratch1, WriteBarrierBuffer::currentIndexOffset()), scratch2);
- JITCompiler::Jump needToFlush = m_jit.branch32(MacroAssembler::AboveOrEqual, scratch2, MacroAssembler::Address(scratch1, WriteBarrierBuffer::capacityOffset()));
+ WriteBarrierBuffer& writeBarrierBuffer = m_jit.vm()->heap.m_writeBarrierBuffer;
+ m_jit.load32(writeBarrierBuffer.currentIndexAddress(), scratch2);
+ JITCompiler::Jump needToFlush = m_jit.branch32(MacroAssembler::AboveOrEqual, scratch2, MacroAssembler::TrustedImm32(writeBarrierBuffer.capacity()));
m_jit.add32(TrustedImm32(1), scratch2);
- m_jit.store32(scratch2, MacroAssembler::Address(scratch1, WriteBarrierBuffer::currentIndexOffset()));
+ m_jit.store32(scratch2, writeBarrierBuffer.currentIndexAddress());
- m_jit.loadPtr(MacroAssembler::Address(scratch1, WriteBarrierBuffer::bufferOffset()), scratch1);
+ m_jit.move(TrustedImmPtr(writeBarrierBuffer.buffer()), scratch1);
// We use an offset of -sizeof(void*) because we already added 1 to scratch2.
m_jit.storePtr(cell, MacroAssembler::BaseIndex(scratch1, scratch2, MacroAssembler::ScalePtr, static_cast<int32_t>(-sizeof(void*))));
done.link(&m_jit);
}
-void SpeculativeJIT::storeToWriteBarrierBuffer(JSCell* cell, GPRReg scratch1, GPRReg scratch2)
+void SpeculativeJIT::writeBarrier(GPRReg ownerGPR, GPRReg scratch1, GPRReg scratch2)
{
- ASSERT(scratch1 != scratch2);
- WriteBarrierBuffer* writeBarrierBuffer = &m_jit.vm()->heap.m_writeBarrierBuffer;
- m_jit.move(TrustedImmPtr(writeBarrierBuffer), scratch1);
- m_jit.load32(MacroAssembler::Address(scratch1, WriteBarrierBuffer::currentIndexOffset()), scratch2);
- JITCompiler::Jump needToFlush = m_jit.branch32(MacroAssembler::AboveOrEqual, scratch2, MacroAssembler::Address(scratch1, WriteBarrierBuffer::capacityOffset()));
-
- m_jit.add32(TrustedImm32(1), scratch2);
- m_jit.store32(scratch2, MacroAssembler::Address(scratch1, WriteBarrierBuffer::currentIndexOffset()));
+ JITCompiler::Jump ownerIsRememberedOrInEden = m_jit.jumpIfIsRememberedOrInEden(ownerGPR);
+ storeToWriteBarrierBuffer(ownerGPR, scratch1, scratch2);
+ ownerIsRememberedOrInEden.link(&m_jit);
+}
- m_jit.loadPtr(MacroAssembler::Address(scratch1, WriteBarrierBuffer::bufferOffset()), scratch1);
- // We use an offset of -sizeof(void*) because we already added 1 to scratch2.
- m_jit.storePtr(TrustedImmPtr(cell), MacroAssembler::BaseIndex(scratch1, scratch2, MacroAssembler::ScalePtr, static_cast<int32_t>(-sizeof(void*))));
+void SpeculativeJIT::compilePutAccessorById(Node* node)
+{
+ SpeculateCellOperand base(this, node->child1());
+ SpeculateCellOperand accessor(this, node->child2());
- JITCompiler::Jump done = m_jit.jump();
- needToFlush.link(&m_jit);
+ GPRReg baseGPR = base.gpr();
+ GPRReg accessorGPR = accessor.gpr();
- // Call C slow path
- silentSpillAllRegisters(InvalidGPRReg);
- callOperation(operationFlushWriteBarrierBuffer, cell);
- silentFillAllRegisters(InvalidGPRReg);
+ flushRegisters();
+ callOperation(node->op() == PutGetterById ? operationPutGetterById : operationPutSetterById, NoResult, baseGPR, identifierUID(node->identifierNumber()), node->accessorAttributes(), accessorGPR);
+ m_jit.exceptionCheck();
- done.link(&m_jit);
+ noResult(node);
}
-void SpeculativeJIT::writeBarrier(GPRReg ownerGPR, JSCell* value, GPRReg scratch1, GPRReg scratch2)
+void SpeculativeJIT::compilePutGetterSetterById(Node* node)
{
- if (Heap::isMarked(value))
- return;
+ SpeculateCellOperand base(this, node->child1());
+ JSValueOperand getter(this, node->child2());
+ JSValueOperand setter(this, node->child3());
- JITCompiler::Jump ownerIsRememberedOrInEden = m_jit.jumpIfIsRememberedOrInEden(ownerGPR);
- storeToWriteBarrierBuffer(ownerGPR, scratch1, scratch2);
- ownerIsRememberedOrInEden.link(&m_jit);
+#if USE(JSVALUE64)
+ GPRReg baseGPR = base.gpr();
+ GPRReg getterGPR = getter.gpr();
+ GPRReg setterGPR = setter.gpr();
+
+ flushRegisters();
+ callOperation(operationPutGetterSetter, NoResult, baseGPR, identifierUID(node->identifierNumber()), node->accessorAttributes(), getterGPR, setterGPR);
+#else
+ // These JSValues may be JSUndefined OR JSFunction*.
+ // At that time,
+ // 1. If the JSValue is JSUndefined, its payload becomes nullptr.
+ // 2. If the JSValue is JSFunction*, its payload becomes JSFunction*.
+ // So extract payload and pass it to operationPutGetterSetter. This hack is used as the same way in baseline JIT.
+ GPRReg baseGPR = base.gpr();
+ JSValueRegs getterRegs = getter.jsValueRegs();
+ JSValueRegs setterRegs = setter.jsValueRegs();
+
+ flushRegisters();
+ callOperation(operationPutGetterSetter, NoResult, baseGPR, identifierUID(node->identifierNumber()), node->accessorAttributes(), getterRegs.payloadGPR(), setterRegs.payloadGPR());
+#endif
+ m_jit.exceptionCheck();
+
+ noResult(node);
}
-void SpeculativeJIT::writeBarrier(GPRReg ownerGPR, GPRReg scratch1, GPRReg scratch2)
+void SpeculativeJIT::compilePutAccessorByVal(Node* node)
{
- JITCompiler::Jump ownerIsRememberedOrInEden = m_jit.jumpIfIsRememberedOrInEden(ownerGPR);
- storeToWriteBarrierBuffer(ownerGPR, scratch1, scratch2);
- ownerIsRememberedOrInEden.link(&m_jit);
-}
+ SpeculateCellOperand base(this, node->child1());
+ JSValueOperand subscript(this, node->child2());
+ SpeculateCellOperand accessor(this, node->child3());
+
+ auto operation = node->op() == PutGetterByVal ? operationPutGetterByVal : operationPutSetterByVal;
+#if USE(JSVALUE64)
+ GPRReg baseGPR = base.gpr();
+ GPRReg subscriptGPR = subscript.gpr();
+ GPRReg accessorGPR = accessor.gpr();
+
+ flushRegisters();
+ callOperation(operation, NoResult, baseGPR, subscriptGPR, node->accessorAttributes(), accessorGPR);
#else
-void SpeculativeJIT::compileStoreBarrier(Node* node)
-{
- DFG_NODE_DO_TO_CHILDREN(m_jit.graph(), node, speculate);
+ GPRReg baseGPR = base.gpr();
+ JSValueRegs subscriptRegs = subscript.jsValueRegs();
+ GPRReg accessorGPR = accessor.gpr();
+
+ flushRegisters();
+ callOperation(operation, NoResult, baseGPR, subscriptRegs.tagGPR(), subscriptRegs.payloadGPR(), node->accessorAttributes(), accessorGPR);
+#endif
+ m_jit.exceptionCheck();
+
noResult(node);
}
-#endif // ENABLE(GGC)
} } // namespace JSC::DFG