+2016-11-11 Chris Dumez <cdumez@apple.com>
+
+ Unreviewed, rolling out r208117 and r208160.
+
+ Regressed Speedometer by >1.5%
+
+ Reverted changesets:
+
+ "We should have a way of profiling when a get_by_id is pure
+ and to emit a PureGetById in the DFG/FTL"
+ https://bugs.webkit.org/show_bug.cgi?id=163305
+ http://trac.webkit.org/changeset/208117
+
+ "Debug JSC test microbenchmarks/pure-get-by-id-cse-2.js timing
+ out"
+ https://bugs.webkit.org/show_bug.cgi?id=164227
+ http://trac.webkit.org/changeset/208160
+
2016-11-11 Saam Barati <sbarati@apple.com>
We should have a more concise way of determining when we're varargs calling a function using rest parameters
+++ /dev/null
-function foo(o, c) {
- if (o.f) {
- let sum = 0;
- for (let i = 0; i < c; i++)
- sum += o.f;
- return sum;
- }
-}
-noInline(foo);
-
-let start = Date.now();
-let objects = [];
-const objectCount = 20;
-for (let i = 0; i < objectCount; i++) {
- let obj = {};
- for (let j = 0; j < i * 2; j++) {
- obj["j" + j] = j;
- }
- obj.f = 20;
- objects.push(obj);
-}
-
-for (let i = 0; i < 10000; i++) {
- let obj = objects[i % objects.length];
- foo(obj, 25);
-}
-
-const verbose = false;
-if (verbose)
- print(Date.now() - start);
+++ /dev/null
-function foo(o) {
- if (o.f)
- return o.f + o.f + o.f + o.f;
-}
-noInline(foo);
-
-let start = Date.now();
-let objects = [];
-const objectCount = 20;
-for (let i = 0; i < objectCount; i++) {
- let obj = {};
- for (let j = 0; j < i * 2; j++) {
- obj["j" + j] = j;
- }
- obj.f = 20;
- objects.push(obj);
-}
-
-for (let i = 0; i < 10000000; i++) {
- let obj = objects[i % objects.length];
- foo(obj);
-}
-
-const verbose = false;
-if (verbose)
- print(Date.now() - start);
+++ /dev/null
-function assert(b) {
- if (!b)
- throw new Error("Bad assertion")
-}
-noInline(assert);
-
-function foo(o1, o2) {
- let a = o1.f1;
- let b = o2.f2;
- return a + o1.f1 + b;
-}
-noInline(foo);
-
-let objs = [];
-const count = 80;
-for (let i = 0; i < count; i++) {
- let o = {};
- for (let j = 0; j < i; ++j) {
- o[j + "J"] = j;
- }
- o.f1 = 20;
- o.f2 = 40;
- objs.push(o);
-}
-
-for (let i = 0; i < 1000; i++) {
- let o1 = objs[i % objs.length];
- let o2 = objs[(i + 1) % objs.length];
- assert(foo(o1, o2) === 80);
-}
-
-let o = objs[count - 1];
-let numCalls = 0;
-Object.defineProperty(o, "f1", {
- get() { ++numCalls; return 25; }
-});
-
-assert(foo(o, objs[count - 2]) === 90);
-assert(numCalls === 2);
+++ /dev/null
-function assert(b) {
- if (!b)
- throw new Error("Bad assertion.")
-}
-
-function foo(o) {
- assert(o.length === o.length);
- return o.length;
-}
-noInline(foo);
-
-let items = [];
-const numItems = 30;
-for (let i = 0; i < numItems; i++) {
- let o = {};
- for (let j = 0; j < i; j++) {
- o[j + "j"] = j;
- }
- o.length = 2;
- items.push(o);
-}
-
-items.push("st");
-
-for (let i = 0; i < 10000; i++)
- assert(foo(items[i % items.length]) === 2);
-
-Number.prototype.length = 2;
-items.push(42);
-
-for (let i = 0; i < 100000; i++)
- assert(foo(items[i % items.length]) === 2);
+2016-11-11 Chris Dumez <cdumez@apple.com>
+
+ Unreviewed, rolling out r208117 and r208160.
+
+ Regressed Speedometer by >1.5%
+
+ Reverted changesets:
+
+ "We should have a way of profiling when a get_by_id is pure
+ and to emit a PureGetById in the DFG/FTL"
+ https://bugs.webkit.org/show_bug.cgi?id=163305
+ http://trac.webkit.org/changeset/208117
+
+ "Debug JSC test microbenchmarks/pure-get-by-id-cse-2.js timing
+ out"
+ https://bugs.webkit.org/show_bug.cgi?id=164227
+ http://trac.webkit.org/changeset/208160
+
2016-11-11 Saam Barati <sbarati@apple.com>
We should have a more concise way of determining when we're varargs calling a function using rest parameters
, resetByGC(false)
, tookSlowPath(false)
, everConsidered(false)
- , didSideEffects(false)
{
}
}
switch (accessType) {
- case AccessType::TryGet:
- resetGetByID(codeBlock, *this, GetByIDKind::Try);
- break;
- case AccessType::PureGet:
+ case AccessType::GetPure:
resetGetByID(codeBlock, *this, GetByIDKind::Pure);
break;
case AccessType::Get:
enum class AccessType : int8_t {
Get,
- TryGet,
- PureGet,
+ GetPure,
Put,
In
};
bool resetByGC : 1;
bool tookSlowPath : 1;
bool everConsidered : 1;
- bool didSideEffects : 1;
};
inline CodeOrigin getStructureStubInfoCodeOrigin(StructureStubInfo& structureStubInfo)
forNode(node).makeHeapTop();
break;
- case PureGetById:
case GetById:
case GetByIdFlush: {
if (!node->prediction()) {
}
}
- if (node->op() == PureGetById)
- clobberStructures(clobberLimit);
- else
- clobberWorld(node->origin.semantic, clobberLimit);
+ clobberWorld(node->origin.semantic, clobberLimit);
forNode(node).makeHeapTop();
break;
}
static bool canBecomeGetArrayLength(Graph& graph, Node* node)
{
- if (node->op() == GetById || node->op() == PureGetById) {
- auto uid = graph.identifiers()[node->identifierNumber()];
- return uid == graph.m_vm.propertyNames->length.impl();
- }
- return false;
+ if (node->op() != GetById)
+ return false;
+ auto uid = graph.identifiers()[node->identifierNumber()];
+ return uid == graph.m_vm.propertyNames->length.impl();
}
ArrayMode ArrayMode::refine(
}
NodeType getById;
- switch (type) {
- case AccessType::Get:
+ if (type == AccessType::Get)
getById = getByIdStatus.makesCalls() ? GetByIdFlush : GetById;
- break;
- case AccessType::TryGet:
+ else
getById = TryGetById;
- break;
- case AccessType::PureGet:
- getById = PureGetById;
- break;
- default:
- RELEASE_ASSERT_NOT_REACHED();
- }
// Special path for custom accessors since custom's offset does not have any meanings.
// So, this is completely different from Simple one. But we have a chance to optimize it when we use DOMJIT.
Node* base = get(VirtualRegister(currentInstruction[2].u.operand));
Node* property = get(VirtualRegister(currentInstruction[3].u.operand));
- bool compileAsGetById = false;
- bool compileAsPureGetById = false;
+ bool compiledAsGetById = false;
GetByIdStatus getByIdStatus;
unsigned identifierNumber = 0;
{
// FIXME: When the bytecode is not compiled in the baseline JIT, byValInfo becomes null.
// At that time, there is no information.
if (byValInfo && byValInfo->stubInfo && !byValInfo->tookSlowPath && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadIdent) && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell)) {
- compileAsGetById = true;
- compileAsPureGetById = !byValInfo->stubInfo->didSideEffects;
+ compiledAsGetById = true;
identifierNumber = m_graph.identifiers().ensure(byValInfo->cachedId.impl());
UniquedStringImpl* uid = m_graph.identifiers()[identifierNumber];
}
}
- if (compileAsGetById) {
- AccessType type = compileAsPureGetById ? AccessType::PureGet : AccessType::Get;
- handleGetById(currentInstruction[1].u.operand, prediction, base, identifierNumber, getByIdStatus, type, OPCODE_LENGTH(op_get_by_val));
- } else {
+ if (compiledAsGetById)
+ handleGetById(currentInstruction[1].u.operand, prediction, base, identifierNumber, getByIdStatus, AccessType::Get, OPCODE_LENGTH(op_get_by_val));
+ else {
ArrayMode arrayMode = getArrayMode(currentInstruction[4].u.arrayProfile, Array::Read);
Node* getByVal = addToGraph(GetByVal, OpInfo(arrayMode.asWord()), OpInfo(prediction), base, property);
m_exitOK = false; // GetByVal must be treated as if it clobbers exit state, since FixupPhase may make it generic.
m_inlineStackTop->m_profiledBlock, m_dfgCodeBlock,
m_inlineStackTop->m_stubInfos, m_dfgStubInfos,
currentCodeOrigin(), uid);
- AccessType type;
- if (opcodeID == op_try_get_by_id)
- type = AccessType::TryGet;
- else {
- ConcurrentJITLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
- unsigned bytecodeIndex = currentCodeOrigin().bytecodeIndex;
- StructureStubInfo* info = m_inlineStackTop->m_stubInfos.get(CodeOrigin(bytecodeIndex));
- if (info && info->everConsidered && !info->didSideEffects)
- type = AccessType::PureGet;
- else
- type = AccessType::Get;
- }
+ AccessType type = op_try_get_by_id == opcodeID ? AccessType::GetPure : AccessType::Get;
unsigned opcodeLength = opcodeID == op_try_get_by_id ? OPCODE_LENGTH(op_try_get_by_id) : OPCODE_LENGTH(op_get_by_id);
def(HeapLocation(IsFunctionLoc, MiscFields, node->child1()), LazyNode(node));
return;
- case PureGetById: {
- // We model what is allowed inside a getOwnPropertySlot(VMInquiry) here.
- // Some getOwnPropertySlot implementations will lazily inject properties, which
- // may change the object's structure.
-
- read(JSCell_structureID);
- read(JSCell_typeInfoFlags);
- read(JSCell_typeInfoType);
- read(JSCell_indexingType);
- read(JSObject_butterfly);
- read(MiscFields);
-
- AbstractHeap propertyNameHeap(NamedProperties, node->identifierNumber());
- read(propertyNameHeap);
-
- write(JSCell_structureID);
- write(JSCell_typeInfoFlags);
-
- write(Watchpoint_fire);
- write(MiscFields);
-
- // This can happen if lazily adding fields to an object happens in getOwnPropertySlot
- // and we need to allocate out of line storage.
- write(JSObject_butterfly);
-
- def(HeapLocation(NamedPropertyLoc, propertyNameHeap, node->child1()), LazyNode(node));
- return;
- }
-
case GetById:
case GetByIdFlush:
case GetByIdWithThis:
break;
}
- case PureGetById:
case GetById:
case GetByIdFlush: {
Edge childEdge = node->child1();
m_interpreter.execute(indexInBlock); // Push CFA over this node after we get the state before.
alreadyHandled = true; // Don't allow the default constant folder to do things to this.
- if (!Options::useAccessInlining())
- break;
-
if (baseValue.m_structure.isTop() || baseValue.m_structure.isClobbered()
|| (node->child1().useKind() == UntypedUse || (baseValue.m_type & ~SpecCell)))
break;
m_interpreter.execute(indexInBlock); // Push CFA over this node after we get the state before.
alreadyHandled = true; // Don't allow the default constant folder to do things to this.
- if (!Options::useAccessInlining())
- break;
-
if (baseValue.m_structure.isTop() || baseValue.m_structure.isClobbered())
break;
case ResolveScope:
return false;
- case PureGetById: // We are modeling getOwnPropertySlot here, which may GC because it is allowed to allocate things.
case CreateActivation:
case CreateDirectArguments:
case CreateScopedArguments:
break;
}
- case PureGetById:
case GetById:
case GetByIdFlush: {
// FIXME: This should be done in the ByteCodeParser based on reading the
void convertToGetByOffset(StorageAccessData& data, Edge storage, Edge base)
{
- ASSERT(m_op == GetById || m_op == GetByIdFlush || m_op == PureGetById || m_op == MultiGetByOffset);
+ ASSERT(m_op == GetById || m_op == GetByIdFlush || m_op == MultiGetByOffset);
m_opInfo = &data;
children.setChild1(storage);
children.setChild2(base);
void convertToMultiGetByOffset(MultiGetByOffsetData* data)
{
- ASSERT(m_op == GetById || m_op == GetByIdFlush || m_op == PureGetById);
+ ASSERT(m_op == GetById || m_op == GetByIdFlush);
m_opInfo = data;
child1().setUseKind(CellUse);
m_op = MultiGetByOffset;
switch (op()) {
case TryGetById:
case GetById:
- case PureGetById:
case GetByIdFlush:
case GetByIdWithThis:
case PutById:
case ArithCeil:
case ArithTrunc:
case GetDirectPname:
- case PureGetById:
case GetById:
case GetByIdFlush:
case GetByIdWithThis:
macro(PutByVal, NodeMustGenerate | NodeHasVarArgs) \
macro(PutByValAlias, NodeMustGenerate | NodeHasVarArgs) \
macro(TryGetById, NodeResultJS) \
- macro(PureGetById, NodeResultJS | NodeMustGenerate) \
macro(GetById, NodeResultJS | NodeMustGenerate) \
macro(GetByIdFlush, NodeResultJS | NodeMustGenerate) \
macro(GetByIdWithThis, NodeResultJS | NodeMustGenerate) \
case RegExpTest:
case StringReplace:
case StringReplaceRegExp:
- case PureGetById:
case GetById:
case GetByIdFlush:
case GetByIdWithThis:
case TryGetById:
case DeleteById:
case DeleteByVal:
- case PureGetById:
case GetById:
case GetByIdWithThis:
case GetByValWithThis:
base.use();
- cachedGetById(node->origin.semantic, baseRegs, resultRegs, node->identifierNumber(), JITCompiler::Jump(), NeedToSpill, AccessType::TryGet);
+ cachedGetById(node->origin.semantic, baseRegs, resultRegs, node->identifierNumber(), JITCompiler::Jump(), NeedToSpill, AccessType::GetPure);
jsValueResult(resultRegs, node, DataFormatJS, UseChildrenCalledExplicitly);
break;
JITCompiler::Jump notCell = m_jit.branchIfNotCell(baseRegs);
- cachedGetById(node->origin.semantic, baseRegs, resultRegs, node->identifierNumber(), notCell, NeedToSpill, AccessType::TryGet);
+ cachedGetById(node->origin.semantic, baseRegs, resultRegs, node->identifierNumber(), notCell, NeedToSpill, AccessType::GetPure);
jsValueResult(resultRegs, node, DataFormatJS, UseChildrenCalledExplicitly);
break;
}
}
-void SpeculativeJIT::compilePureGetById(Node* node)
-{
- ASSERT(node->op() == PureGetById);
-
- switch (node->child1().useKind()) {
- case CellUse: {
- SpeculateCellOperand base(this, node->child1());
- JSValueRegsTemporary result(this, Reuse, base);
-
- JSValueRegs baseRegs = JSValueRegs::payloadOnly(base.gpr());
- JSValueRegs resultRegs = result.regs();
-
- cachedGetById(node->origin.semantic, baseRegs, resultRegs, node->identifierNumber(), JITCompiler::Jump(), NeedToSpill, AccessType::PureGet);
-
- jsValueResult(resultRegs, node);
- break;
- }
- case UntypedUse: {
- JSValueOperand base(this, node->child1());
- JSValueRegsTemporary result(this, Reuse, base);
-
- JSValueRegs baseRegs = base.jsValueRegs();
- JSValueRegs resultRegs = result.regs();
-
- JITCompiler::Jump notCell = m_jit.branchIfNotCell(baseRegs);
-
- cachedGetById(node->origin.semantic, baseRegs, resultRegs, node->identifierNumber(), notCell, NeedToSpill, AccessType::PureGet);
-
- jsValueResult(resultRegs, node);
- break;
- }
- default:
- RELEASE_ASSERT_NOT_REACHED();
- }
-}
-
void SpeculativeJIT::compileIn(Node* node)
{
SpeculateCellOperand base(this, node->child2());
void compileDeleteById(Node*);
void compileDeleteByVal(Node*);
void compileTryGetById(Node*);
- void compilePureGetById(Node*);
void compileIn(Node*);
void nonSpeculativeNonPeepholeCompareNullOrUndefined(Edge operand);
J_JITOperation_ESsiJI getByIdFunction;
if (type == AccessType::Get)
getByIdFunction = operationGetByIdOptimize;
- else if (type == AccessType::PureGet)
- getByIdFunction = operationPureGetByIdOptimize;
else
getByIdFunction = operationTryGetByIdOptimize;
break;
}
- case PureGetById: {
- compilePureGetById(node);
- break;
- }
-
case GetByIdWithThis: {
JSValueOperand base(this, node->child1());
JSValueRegs baseRegs = base.jsValueRegs();
slowCases.append(slowPathTarget);
slowCases.append(gen.slowPathJump());
- auto slowPathFunction = type == AccessType::Get ? operationGetByIdOptimize :
- type == AccessType::PureGet ? operationPureGetByIdOptimize : operationTryGetByIdOptimize;
-
auto slowPath = slowPathCall(
- slowCases, this, slowPathFunction,
+ slowCases, this, type == AccessType::Get ? operationGetByIdOptimize : operationTryGetByIdOptimize,
spillMode, ExceptionCheckRequirement::CheckNeeded,
resultGPR, gen.stubInfo(), baseGPR, identifierUID(identifierNumber));
break;
}
- case PureGetById: {
- compilePureGetById(node);
- break;
- }
-
case GetByIdFlush: {
if (!node->prediction()) {
terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), 0);
case NewArrayWithSize:
case TryGetById:
case GetById:
- case PureGetById:
case GetByIdFlush:
case GetByIdWithThis:
case ToThis:
compilePutStructure();
break;
case TryGetById:
- compileGetById(AccessType::TryGet);
- break;
- case PureGetById:
- compileGetById(AccessType::PureGet);
+ compileGetById(AccessType::GetPure);
break;
case GetById:
case GetByIdFlush:
void compileGetById(AccessType type)
{
- ASSERT(type == AccessType::Get || type == AccessType::TryGet || type == AccessType::PureGet);
+ ASSERT(type == AccessType::Get || type == AccessType::GetPure);
switch (m_node->child1().useKind()) {
case CellUse: {
setJSValue(getById(lowCell(m_node->child1()), type));
J_JITOperation_EJI getByIdFunction;
if (type == AccessType::Get)
getByIdFunction = operationGetByIdGeneric;
- else if (type == AccessType::PureGet)
- getByIdFunction = operationPureGetByIdGeneric;
else
getByIdFunction = operationTryGetByIdGeneric;
J_JITOperation_ESsiJI optimizationFunction;
if (type == AccessType::Get)
optimizationFunction = operationGetByIdOptimize;
- else if (type == AccessType::PureGet)
- optimizationFunction = operationPureGetByIdOptimize;
else
optimizationFunction = operationTryGetByIdOptimize;
namespace JSC {
-ALWAYS_INLINE static EncodedJSValue pureGetByIdCommon(VM& vm, ExecState* exec, EncodedJSValue base, UniquedStringImpl* uid, const std::function<void (const PropertySlot&, const Identifier&)>& function = [] (const PropertySlot&, const Identifier&) { })
-{
- Identifier ident = Identifier::fromUid(&vm, uid);
- JSValue baseValue = JSValue::decode(base);
-
- ASSERT(JITCode::isOptimizingJIT(exec->codeBlock()->jitType()));
-
- PropertySlot slot(baseValue, PropertySlot::InternalMethodType::VMInquiry);
- return JSValue::encode(baseValue.getPropertySlot(exec, ident, slot, [&] (bool, PropertySlot&) -> JSValue {
- bool willDoSideEffects = !(slot.isValue() || slot.isUnset()) || slot.isTaintedByOpaqueObject();
- if (UNLIKELY(willDoSideEffects)) {
- {
- CodeOrigin codeOrigin = exec->codeOrigin();
- CodeBlock* currentBaseline = baselineCodeBlockForOriginAndBaselineCodeBlock(codeOrigin, exec->codeBlock()->alternative());
- CodeOrigin originBytecodeIndex = CodeOrigin(codeOrigin.bytecodeIndex); // Since we're searching in the baseline, we just care about bytecode index.
- ConcurrentJITLocker locker(currentBaseline->m_lock);
- if (StructureStubInfo* stub = currentBaseline->findStubInfo(originBytecodeIndex))
- stub->didSideEffects = true;
- }
-
- exec->codeBlock()->jettison(Profiler::JettisonDueToPureGetByIdEffects);
- return baseValue.get(exec, uid);
- }
-
- function(slot, ident);
- return slot.isValue() ? slot.getValue(exec, ident) : jsUndefined();
- }));
-}
-
extern "C" {
#if COMPILER(MSVC)
return missingArgCount;
}
-EncodedJSValue JIT_OPERATION operationPureGetByIdGeneric(ExecState* exec, EncodedJSValue base, UniquedStringImpl* uid)
-{
- VM* vm = &exec->vm();
- NativeCallFrameTracer tracer(vm, exec);
-
- return pureGetByIdCommon(*vm, exec, base, uid);
-}
-
-EncodedJSValue JIT_OPERATION operationPureGetById(ExecState* exec, StructureStubInfo* stubInfo, EncodedJSValue base, UniquedStringImpl* uid)
-{
- VM* vm = &exec->vm();
- NativeCallFrameTracer tracer(vm, exec);
-
- stubInfo->tookSlowPath = true;
-
- return pureGetByIdCommon(*vm, exec, base, uid);
-}
-
-EncodedJSValue JIT_OPERATION operationPureGetByIdOptimize(ExecState* exec, StructureStubInfo* stubInfo, EncodedJSValue base, UniquedStringImpl* uid)
-{
- VM* vm = &exec->vm();
- NativeCallFrameTracer tracer(vm, exec);
-
- return pureGetByIdCommon(*vm, exec, base, uid,
- [&] (const PropertySlot& slot, const Identifier& ident) {
- ASSERT((slot.isValue() || slot.isUnset()) && !slot.isTaintedByOpaqueObject());
- JSValue baseValue = JSValue::decode(base);
- if (stubInfo->considerCaching(baseValue.structureOrNull()))
- repatchGetByID(exec, baseValue, ident, slot, *stubInfo, GetByIDKind::Pure);
- });
-}
-
EncodedJSValue JIT_OPERATION operationTryGetById(ExecState* exec, StructureStubInfo* stubInfo, EncodedJSValue base, UniquedStringImpl* uid)
{
VM* vm = &exec->vm();
return JSValue::encode(slot.getPureResult());
}
+
EncodedJSValue JIT_OPERATION operationTryGetByIdGeneric(ExecState* exec, EncodedJSValue base, UniquedStringImpl* uid)
{
VM* vm = &exec->vm();
RETURN_IF_EXCEPTION(scope, encodedJSValue());
if (stubInfo->considerCaching(baseValue.structureOrNull()) && !slot.isTaintedByOpaqueObject() && (slot.isCacheableValue() || slot.isCacheableGetter() || slot.isUnset()))
- repatchGetByID(exec, baseValue, ident, slot, *stubInfo, GetByIDKind::Try);
+ repatchGetByID(exec, baseValue, ident, slot, *stubInfo, GetByIDKind::Pure);
return JSValue::encode(slot.getPureResult());
}
Identifier ident = Identifier::fromUid(vm, uid);
LOG_IC((ICEvent::OperationGetById, baseValue.classInfoOrNull(), ident));
- JSValue result = baseValue.get(exec, ident, slot);
- bool willDoSideEffects = !(slot.isValue() || slot.isUnset()) || slot.isTaintedByOpaqueObject();
- stubInfo->didSideEffects |= willDoSideEffects;
- return JSValue::encode(result);
+ return JSValue::encode(baseValue.get(exec, ident, slot));
}
EncodedJSValue JIT_OPERATION operationGetByIdGeneric(ExecState* exec, EncodedJSValue base, UniquedStringImpl* uid)
LOG_IC((ICEvent::OperationGetByIdOptimize, baseValue.classInfoOrNull(), ident));
return JSValue::encode(baseValue.getPropertySlot(exec, ident, [&] (bool found, PropertySlot& slot) -> JSValue {
- bool willDoSideEffects = !(slot.isValue() || slot.isUnset()) || slot.isTaintedByOpaqueObject();
- stubInfo->didSideEffects |= willDoSideEffects;
-
if (stubInfo->considerCaching(baseValue.structureOrNull()))
repatchGetByID(exec, baseValue, ident, slot, *stubInfo, GetByIDKind::Normal);
return found ? slot.getValue(exec, ident) : jsUndefined();
EncodedJSValue JIT_OPERATION operationTryGetById(ExecState*, StructureStubInfo*, EncodedJSValue, UniquedStringImpl*) WTF_INTERNAL;
EncodedJSValue JIT_OPERATION operationTryGetByIdGeneric(ExecState*, EncodedJSValue, UniquedStringImpl*) WTF_INTERNAL;
EncodedJSValue JIT_OPERATION operationTryGetByIdOptimize(ExecState*, StructureStubInfo*, EncodedJSValue, UniquedStringImpl*) WTF_INTERNAL;
-EncodedJSValue JIT_OPERATION operationPureGetById(ExecState*, StructureStubInfo*, EncodedJSValue, UniquedStringImpl*) WTF_INTERNAL;
-EncodedJSValue JIT_OPERATION operationPureGetByIdGeneric(ExecState*, EncodedJSValue, UniquedStringImpl*) WTF_INTERNAL;
-EncodedJSValue JIT_OPERATION operationPureGetByIdOptimize(ExecState*, StructureStubInfo*, EncodedJSValue, UniquedStringImpl*) WTF_INTERNAL;
EncodedJSValue JIT_OPERATION operationGetById(ExecState*, StructureStubInfo*, EncodedJSValue, UniquedStringImpl*) WTF_INTERNAL;
EncodedJSValue JIT_OPERATION operationGetByIdGeneric(ExecState*, EncodedJSValue, UniquedStringImpl*) WTF_INTERNAL;
EncodedJSValue JIT_OPERATION operationGetByIdOptimize(ExecState*, StructureStubInfo*, EncodedJSValue, UniquedStringImpl*) WTF_INTERNAL;
JITGetByIdGenerator gen(
m_codeBlock, CodeOrigin(m_bytecodeOffset), CallSiteIndex(m_bytecodeOffset), RegisterSet::stubUnavailableRegisters(),
- ident->impl(), JSValueRegs(regT0), JSValueRegs(regT0), AccessType::TryGet);
+ ident->impl(), JSValueRegs(regT0), JSValueRegs(regT0), AccessType::GetPure);
gen.generateFastPath(*this);
addSlowCase(gen.slowPathJump());
m_getByIds.append(gen);
JITGetByIdGenerator gen(
m_codeBlock, CodeOrigin(m_bytecodeOffset), CallSiteIndex(currentInstruction), RegisterSet::stubUnavailableRegisters(),
- ident->impl(), JSValueRegs::payloadOnly(regT0), JSValueRegs(regT1, regT0), AccessType::TryGet);
+ ident->impl(), JSValueRegs::payloadOnly(regT0), JSValueRegs(regT1, regT0), AccessType::GetPure);
gen.generateFastPath(*this);
addSlowCase(gen.slowPathJump());
m_getByIds.append(gen);
inline J_JITOperation_ESsiJI appropriateOptimizingGetByIdFunction(GetByIDKind kind)
{
- switch (kind) {
- case GetByIDKind::Normal:
+ if (kind == GetByIDKind::Normal)
return operationGetByIdOptimize;
- case GetByIDKind::Try:
- return operationTryGetByIdOptimize;
- case GetByIDKind::Pure:
- return operationPureGetByIdOptimize;
- default:
- break;
- }
- ASSERT_NOT_REACHED();
- return operationGetByIdOptimize;
+ return operationTryGetByIdOptimize;
}
inline J_JITOperation_ESsiJI appropriateGenericGetByIdFunction(GetByIDKind kind)
{
- switch (kind) {
- case GetByIDKind::Normal:
+ if (kind == GetByIDKind::Normal)
return operationGetById;
- case GetByIDKind::Try:
- return operationTryGetById;
- case GetByIDKind::Pure:
- return operationPureGetById;
- default:
- break;
- }
- ASSERT_NOT_REACHED();
- return operationGetById;
+ return operationTryGetById;
}
static InlineCacheAction tryCacheGetByID(ExecState* exec, JSValue baseValue, const Identifier& propertyName, const PropertySlot& slot, StructureStubInfo& stubInfo, GetByIDKind kind)
type = AccessCase::Load;
else if (slot.isUnset())
type = AccessCase::Miss;
- else
- RELEASE_ASSERT_NOT_REACHED();
-
- newCase = AccessCase::tryGet(vm, codeBlock, type, offset, structure, conditionSet, loadTargetFromProxy, slot.watchpointSet());
- } else if (kind == GetByIDKind::Try) {
- AccessCase::AccessType type;
- if (slot.isCacheableValue())
- type = AccessCase::Load;
- else if (slot.isUnset())
- type = AccessCase::Miss;
else if (slot.isCacheableGetter())
type = AccessCase::GetGetter;
else
enum class GetByIDKind {
Normal,
- Try,
Pure
};
case JettisonDueToOSRExit:
out.print("OSRExit");
return;
- case JettisonDueToPureGetByIdEffects:
- out.print("PureGetByIdEffects");
- return;
case JettisonDueToProfiledWatchpoint:
out.print("ProfiledWatchpoint");
return;
JettisonDueToBaselineLoopReoptimizationTrigger,
JettisonDueToBaselineLoopReoptimizationTriggerOnOSREntryFail,
JettisonDueToOSRExit,
- JettisonDueToPureGetByIdEffects,
JettisonDueToProfiledWatchpoint,
JettisonDueToUnprofiledWatchpoint,
JettisonDueToOldAge