+2017-02-26 Caio Lima <ticaiolima@gmail.com>
+
+ op_get_by_id_with_this should use inline caching
+ https://bugs.webkit.org/show_bug.cgi?id=162124
+
+ Reviewed by Saam Barati.
+
+ * microbenchmarks/super-getter.js: Added.
+ (A.prototype.get f):
+ (A):
+ (B.prototype.get f):
+ (B):
+ * stress/super-force-ic-fail.js: Added.
+ (let.assert):
+ (let.aObj.get foo):
+ (let.obj.jaz):
+ (let.bObj.get foo):
+ (let.obj2.foo):
+ * stress/super-get-by-id.js: Added.
+ (assert):
+ (Base):
+ (Base.prototype.get name):
+ (Base.prototype.set name):
+ (Subclass.prototype.get name):
+ (Subclass):
+ (getterName):
+ (getterValue):
+ (PolymorphicSubclass.prototype.get value):
+ (PolymorphicSubclass):
+ (i.let.BaseCode):
+ (i.get value):
+ (MegamorphicSubclass.prototype.get value):
+ (MegamorphicSubclass):
+ (let.subObj.get value):
+ (i.catch):
+ (subObj.get value):
+ (BaseException):
+ (BaseException.prototype.get name):
+ (SubclassException.prototype.get name):
+ (SubclassException):
+ (prototype.foo):
+ (prototype.get name):
+ (SubclassExceptionComplex.prototype.get name):
+ (SubclassExceptionComplex):
+ * stress/super-getter-reset-ic.js: Added.
+ (let.assert):
+ (let.B.f):
+
2017-02-24 JF Bastien <jfbastien@apple.com>
WebAssembly: miscellaneous spec fixes
--- /dev/null
+class A {
+ get f() {
+ return this._f;
+ }
+}
+
+class B extends A {
+ get f() {
+ return super.f;
+ }
+}
+
+(function() {
+ var o = new B();
+
+ o._f = 42;
+ var result = 0;
+ var n = 2000000;
+ for (var i = 0; i < n; ++i)
+ result += o.f;
+ if (result != n * 42)
+ throw "Error: bad result: " + result;
+})();
+
--- /dev/null
+let assert = (a) => {
+ if (!a)
+ throw Error("Bad Assertion");
+}
+
+let aObj = {
+ get foo() { return this.a; }
+};
+
+let obj = {
+ jaz() {
+ return super.foo;
+ }
+};
+obj.a = "foo";
+
+Object.setPrototypeOf(obj, aObj);
+
+noInline(obj.jaz);
+
+for (let i = 0; i < 10000; i++) {
+ if (i == 9999) {
+ delete aObj.foo;
+ assert(obj.jaz() === undefined);
+ } else {
+ assert(obj.jaz() == "foo");
+ }
+
+}
+
+let bObj = {
+ get foo() { return this.a; }
+};
+
+let obj2 = {
+ foo() {
+ return super.foo;
+ }
+};
+obj2.a = "foo";
+
+Object.setPrototypeOf(obj2, bObj);
+
+noInline(obj.jaz);
+
+for (let i = 0; i < 10000; i++) {
+ if (i == 9999) {
+ Object.defineProperty(bObj, "foo", {
+ get: () => {return "boo"; }
+ });
+ assert(obj2.foo() == "boo");
+ } else {
+ assert(obj2.foo() == "foo");
+ }
+}
+
--- /dev/null
+"use strict";
+
+function assert(a) {
+ if (!a)
+ throw new Error("Bad!");
+}
+
+var Base = class Base {
+ constructor() { this._name = "Name"; }
+ get name() { return this._name; } // If this instead returns a static: return "Foo" things somewhat work.
+ set name(x) { this._name = x; }
+};
+
+var Subclass = class Subclass extends Base {
+ get name() { return super.name; }
+};
+
+function getterName(instance) {
+ return instance.name;
+}
+
+noInline(getterName);
+
+function getterValue(instance) {
+ return instance.value;
+}
+
+noInline(getterValue);
+
+// Base case
+var instance = new Subclass;
+for (let i = 0; i < 10000;i++)
+ assert(getterName(instance) == "Name");
+
+// Polymorphic case
+
+class PolymorphicSubclass {
+ get value() { return super.value; }
+};
+
+let numPolymorphicClasses = 4;
+let subclasses = new Array(numPolymorphicClasses);
+for (let i = 0; i < numPolymorphicClasses; i++) {
+ let BaseCode = `
+ class Base${i} {
+ get value() { return this._value; }
+ };
+ `;
+
+ let Base = eval(BaseCode);
+ subclasses[i] = new PolymorphicSubclass();
+ subclasses[i]._value = i;
+
+ Object.setPrototypeOf(subclasses[i], Base.prototype);
+}
+
+for (let i = 0; i < 1000000; i++) {
+ let index = i % numPolymorphicClasses;
+ let value = getterValue(subclasses[index]);
+ assert(value == index);
+}
+
+// Megamorphic case
+
+let nClasses = 1000;
+class MegamorphicSubclass {
+ get value() { return super.value; }
+};
+
+subclasses = new Array(nClasses);
+for (let i = 0; i < nClasses; i++) {
+ let BaseCode = `
+ class Base${i + 4} {
+ get value() { return this._value; }
+ };
+ `;
+
+ let Base = eval(BaseCode);
+ subclasses[i] = new MegamorphicSubclass();
+ subclasses[i]._value = i;
+
+ Object.setPrototypeOf(subclasses[i], Base.prototype);
+}
+
+for (let i = 0; i < 1000000; i++) {
+ let index = i % nClasses;
+ let value = getterValue(subclasses[index]);
+ assert(value == index);
+}
+
+// CustomGetter case
+
+let customGetter = createCustomGetterObject();
+Object.setPrototypeOf(customGetter, Object.prototype);
+
+let subObj = {
+ __proto__: customGetter,
+ get value () {
+ return super.customGetterAccessor;
+ }
+}
+
+for (let i = 0; i < 1000000; i++) {
+ let value = getterValue(subObj);
+ assert(value == 100);
+}
+
+subObj.shouldThrow = true;
+for (let i = 0; i < 1000000; i++) {
+ try {
+ getterValue(subObj);
+ assert(false);
+ } catch(e) {
+ assert(e instanceof TypeError);
+ };
+}
+
+// CustomValue case
+
+customGetter = createCustomGetterObject();
+Object.setPrototypeOf(customGetter, Object.prototype);
+
+subObj = {
+ __proto__: customGetter,
+ get value () {
+ return super.customGetter;
+ }
+}
+
+for (let i = 0; i < 1000000; i++) {
+ let value = getterValue(subObj);
+ assert(value == 100);
+}
+
+subObj.shouldThrow = true;
+for (let i = 0; i < 1000000; i++) {
+ let value = getterValue(subObj);
+ assert(value == 100);
+}
+
+// Exception handling case
+
+class BaseException {
+ constructor() { this._name = "Name"; }
+ get name() {
+ if (this.shouldThrow)
+ throw new Error("Forced Exception");
+ return this._name;
+ }
+};
+
+class SubclassException extends BaseException {
+ get name() { return super.name; }
+};
+
+let eObj = new SubclassException;
+for (let i = 0; i < 10000;i++)
+ assert(getterName(eObj) == "Name");
+
+eObj.shouldThrow = true;
+for (let i = 0; i < 1000000; i++) {
+ try {
+ getterValue(eObj);
+ assert(false);
+ } catch(e) {
+ eObj.shouldThrow = false;
+ assert(getterName(eObj) == "Name");
+ };
+}
+
+// In getter exception handling
+
+class BaseExceptionComplex {
+ constructor() { this._name = "Name"; }
+ foo () {
+ if (this.shouldThrow)
+ throw new Error("Forced Exception");
+ }
+ get name() {
+ this.foo();
+ return this._name;
+ }
+};
+
+class SubclassExceptionComplex extends BaseExceptionComplex {
+ get name() {
+ try {
+ return super.name;
+ } catch(e) {
+ this.shouldThrow = false;
+ return super.name;
+ }
+ }
+};
+
+eObj = new SubclassExceptionComplex;
+for (let i = 0; i < 10000;i++)
+ assert(getterName(eObj) == "Name");
+
+eObj.shouldThrow = true;
+for (let i = 0; i < 1000000; i++)
+ assert(getterName(eObj) == "Name");
+
--- /dev/null
+let assert = (a) => {
+ if (!a)
+ throw "Bad!";
+}
+
+let n = 200;
+
+let A = {
+ c: 42
+}
+
+let C = {
+ __proto__: A
+}
+
+let B = {
+ __proto__: C,
+ f(i) {
+ return super.c;
+ }
+}
+
+var result = 0;
+for (var i = 0; i < n; ++i) {
+ if (i == n / 2 ) {
+ // This operation is going to force op_get_by_id_with_this to be regenerated
+ Object.defineProperty(A, "c", {get: () => 12});
+ }
+ result += B.f(i);
+}
+
+assert(result, 5400);
+
+2017-02-26 Caio Lima <ticaiolima@gmail.com>
+
+ op_get_by_id_with_this should use inline caching
+ https://bugs.webkit.org/show_bug.cgi?id=162124
+
+ Reviewed by Saam Barati.
+
+ This patch is enabling inline cache for op_get_by_id_with_this in all
+ tiers. It means that operations using ```super.member``` are going to
+ be able to be optimized by PIC. To enable it, we introduced a new
+ member of StructureStubInfo.patch named thisGPR, created a new class
+ to manage the IC named JITGetByIdWithThisGenerator and changed
+ PolymorphicAccess.regenerate that uses StructureStubInfo.patch.thisGPR
+ to decide the correct this value on inline caches.
+ With inline cached enabled, ```super.member``` are ~4.5x faster,
+ according microbenchmarks.
+
+ * bytecode/AccessCase.cpp:
+ (JSC::AccessCase::generateImpl):
+ * bytecode/PolymorphicAccess.cpp:
+ (JSC::PolymorphicAccess::regenerate):
+ * bytecode/PolymorphicAccess.h:
+ * bytecode/StructureStubInfo.cpp:
+ (JSC::StructureStubInfo::reset):
+ * bytecode/StructureStubInfo.h:
+ * dfg/DFGFixupPhase.cpp:
+ (JSC::DFG::FixupPhase::fixupNode):
+ * dfg/DFGJITCompiler.cpp:
+ (JSC::DFG::JITCompiler::link):
+ * dfg/DFGJITCompiler.h:
+ (JSC::DFG::JITCompiler::addGetByIdWithThis):
+ * dfg/DFGSpeculativeJIT.cpp:
+ (JSC::DFG::SpeculativeJIT::compileIn):
+ * dfg/DFGSpeculativeJIT.h:
+ (JSC::DFG::SpeculativeJIT::callOperation):
+ * dfg/DFGSpeculativeJIT32_64.cpp:
+ (JSC::DFG::SpeculativeJIT::cachedGetByIdWithThis):
+ (JSC::DFG::SpeculativeJIT::compile):
+ * dfg/DFGSpeculativeJIT64.cpp:
+ (JSC::DFG::SpeculativeJIT::cachedGetByIdWithThis):
+ (JSC::DFG::SpeculativeJIT::compile):
+ * ftl/FTLLowerDFGToB3.cpp:
+ (JSC::FTL::DFG::LowerDFGToB3::compileGetByIdWithThis):
+ (JSC::FTL::DFG::LowerDFGToB3::compileIn):
+ (JSC::FTL::DFG::LowerDFGToB3::getByIdWithThis):
+ * jit/CCallHelpers.h:
+ (JSC::CCallHelpers::setupArgumentsWithExecState):
+ * jit/ICStats.h:
+ * jit/JIT.cpp:
+ (JSC::JIT::JIT):
+ (JSC::JIT::privateCompileSlowCases):
+ (JSC::JIT::link):
+ * jit/JIT.h:
+ * jit/JITInlineCacheGenerator.cpp:
+ (JSC::JITByIdGenerator::JITByIdGenerator):
+ (JSC::JITGetByIdWithThisGenerator::JITGetByIdWithThisGenerator):
+ (JSC::JITGetByIdWithThisGenerator::generateFastPath):
+ * jit/JITInlineCacheGenerator.h:
+ (JSC::JITGetByIdWithThisGenerator::JITGetByIdWithThisGenerator):
+ * jit/JITInlines.h:
+ (JSC::JIT::callOperation):
+ * jit/JITOperations.cpp:
+ * jit/JITOperations.h:
+ * jit/JITPropertyAccess.cpp:
+ (JSC::JIT::emit_op_get_by_id_with_this):
+ (JSC::JIT::emitSlow_op_get_by_id_with_this):
+ * jit/JITPropertyAccess32_64.cpp:
+ (JSC::JIT::emit_op_get_by_id_with_this):
+ (JSC::JIT::emitSlow_op_get_by_id_with_this):
+ * jit/Repatch.cpp:
+ (JSC::appropriateOptimizingGetByIdFunction):
+ (JSC::appropriateGenericGetByIdFunction):
+ (JSC::tryCacheGetByID):
+ * jit/Repatch.h:
+ * jsc.cpp:
+ (WTF::CustomGetter::getOwnPropertySlot):
+ (WTF::CustomGetter::customGetterAcessor):
+
2017-02-24 JF Bastien <jfbastien@apple.com>
WebAssembly: miscellaneous spec fixes
const Identifier& ident = *state.ident;
JSValueRegs valueRegs = state.valueRegs;
GPRReg baseGPR = state.baseGPR;
+ GPRReg thisGPR = state.thisGPR != InvalidGPRReg ? state.thisGPR : baseGPR;
GPRReg scratchGPR = state.scratchGPR;
ASSERT(m_conditionSet.structuresEnsureValidityAssumingImpurePropertyWatchpoint());
loadedValueGPR, calleeFrame.withOffset(CallFrameSlot::callee * sizeof(Register)));
jit.storeCell(
- baseGPR,
+ thisGPR,
calleeFrame.withOffset(virtualRegisterForArgument(0).offset() * sizeof(Register)));
if (m_type == Setter) {
// to make some space here.
jit.makeSpaceOnStackForCCall();
+ // Check if it is a super access
+ GPRReg baseForCustomGetGPR = baseGPR != thisGPR ? thisGPR : baseForGetGPR;
+
// getter: EncodedJSValue (*GetValueFunc)(ExecState*, EncodedJSValue thisValue, PropertyName);
// setter: void (*PutValueFunc)(ExecState*, EncodedJSValue thisObject, EncodedJSValue value);
// Custom values are passed the slotBase (the property holder), custom accessors are passed the thisVaule (reciever).
// FIXME: Remove this differences in custom values and custom accessors.
// https://bugs.webkit.org/show_bug.cgi?id=158014
- GPRReg baseForCustomValue = m_type == CustomValueGetter || m_type == CustomValueSetter ? baseForAccessGPR : baseForGetGPR;
+ GPRReg baseForCustom = m_type == CustomValueGetter || m_type == CustomValueSetter ? baseForAccessGPR : baseForCustomGetGPR;
#if USE(JSVALUE64)
if (m_type == CustomValueGetter || m_type == CustomAccessorGetter) {
jit.setupArgumentsWithExecState(
- baseForCustomValue,
+ baseForCustom,
CCallHelpers::TrustedImmPtr(ident.impl()));
} else
- jit.setupArgumentsWithExecState(baseForCustomValue, valueRegs.gpr());
+ jit.setupArgumentsWithExecState(baseForCustom, valueRegs.gpr());
#else
if (m_type == CustomValueGetter || m_type == CustomAccessorGetter) {
jit.setupArgumentsWithExecState(
- EABI_32BIT_DUMMY_ARG baseForCustomValue,
+ EABI_32BIT_DUMMY_ARG baseForCustom,
CCallHelpers::TrustedImm32(JSValue::CellTag),
CCallHelpers::TrustedImmPtr(ident.impl()));
} else {
jit.setupArgumentsWithExecState(
- EABI_32BIT_DUMMY_ARG baseForCustomValue,
+ EABI_32BIT_DUMMY_ARG baseForCustom,
CCallHelpers::TrustedImm32(JSValue::CellTag),
valueRegs.payloadGPR(), valueRegs.tagGPR());
}
state.ident = &ident;
state.baseGPR = static_cast<GPRReg>(stubInfo.patch.baseGPR);
+ state.thisGPR = static_cast<GPRReg>(stubInfo.patch.thisGPR);
state.valueRegs = stubInfo.valueRegs();
ScratchRegisterAllocator allocator(stubInfo.patch.usedRegisters);
state.allocator = &allocator;
allocator.lock(state.baseGPR);
+ if (state.thisGPR != InvalidGPRReg)
+ allocator.lock(state.thisGPR);
allocator.lock(state.valueRegs);
#if USE(JSVALUE32_64)
allocator.lock(static_cast<GPRReg>(stubInfo.patch.baseTagGPR));
MacroAssembler::JumpList failAndRepatch;
MacroAssembler::JumpList failAndIgnore;
GPRReg baseGPR { InvalidGPRReg };
+ GPRReg thisGPR { InvalidGPRReg };
JSValueRegs valueRegs;
GPRReg scratchGPR { InvalidGPRReg };
const Identifier* ident;
case AccessType::Get:
resetGetByID(codeBlock, *this, GetByIDKind::Normal);
break;
+ case AccessType::GetWithThis:
+ resetGetByID(codeBlock, *this, GetByIDKind::WithThis);
+ break;
case AccessType::Put:
resetPutByID(codeBlock, *this);
break;
enum class AccessType : int8_t {
Get,
+ GetWithThis,
TryGet,
Put,
In
int8_t baseGPR;
int8_t valueGPR;
+ int8_t thisGPR;
#if USE(JSVALUE32_64)
int8_t valueTagGPR;
int8_t baseTagGPR;
+ int8_t thisTagGPR;
#endif
} patch;
fixEdge<CellUse>(node->child1());
break;
}
+
+ case GetByIdWithThis: {
+ if (node->child1()->shouldSpeculateCell() && node->child2()->shouldSpeculateCell()) {
+ fixEdge<CellUse>(node->child1());
+ fixEdge<CellUse>(node->child2());
+ }
+ break;
+ }
case PutById:
case PutByIdFlush:
case ExitOK:
case BottomValue:
case TypeOf:
- case GetByIdWithThis:
case PutByIdWithThis:
case PutByValWithThis:
case GetByValWithThis:
for (unsigned i = m_getByIds.size(); i--;)
m_getByIds[i].finalize(linkBuffer);
+ for (unsigned i = m_getByIdsWithThis.size(); i--;)
+ m_getByIdsWithThis[i].finalize(linkBuffer);
for (unsigned i = m_putByIds.size(); i--;)
m_putByIds[i].finalize(linkBuffer);
m_getByIds.append(InlineCacheWrapper<JITGetByIdGenerator>(gen, slowPath));
}
+ void addGetByIdWithThis(const JITGetByIdWithThisGenerator& gen, SlowPathGenerator* slowPath)
+ {
+ m_getByIdsWithThis.append(InlineCacheWrapper<JITGetByIdWithThisGenerator>(gen, slowPath));
+ }
+
void addPutById(const JITPutByIdGenerator& gen, SlowPathGenerator* slowPath)
{
m_putByIds.append(InlineCacheWrapper<JITPutByIdGenerator>(gen, slowPath));
};
Vector<InlineCacheWrapper<JITGetByIdGenerator>, 4> m_getByIds;
+ Vector<InlineCacheWrapper<JITGetByIdWithThisGenerator>, 4> m_getByIdsWithThis;
Vector<InlineCacheWrapper<JITPutByIdGenerator>, 4> m_putByIds;
Vector<InRecord, 4> m_ins;
Vector<JSCallRecord, 4> m_jsCalls;
stubInfo->codeOrigin = node->origin.semantic;
stubInfo->patch.baseGPR = static_cast<int8_t>(baseGPR);
stubInfo->patch.valueGPR = static_cast<int8_t>(resultGPR);
+ stubInfo->patch.thisGPR = static_cast<int8_t>(InvalidGPRReg);
#if USE(JSVALUE32_64)
stubInfo->patch.valueTagGPR = static_cast<int8_t>(InvalidGPRReg);
stubInfo->patch.baseTagGPR = static_cast<int8_t>(InvalidGPRReg);
+ stubInfo->patch.thisTagGPR = static_cast<int8_t>(InvalidGPRReg);
#endif
stubInfo->patch.usedRegisters = usedRegisters();
#if USE(JSVALUE64)
void cachedGetById(CodeOrigin, GPRReg baseGPR, GPRReg resultGPR, unsigned identifierNumber, JITCompiler::Jump slowPathTarget = JITCompiler::Jump(), SpillRegistersMode = NeedToSpill, AccessType = AccessType::Get);
void cachedPutById(CodeOrigin, GPRReg base, GPRReg value, GPRReg scratchGPR, unsigned identifierNumber, PutKind, JITCompiler::Jump slowPathTarget = JITCompiler::Jump(), SpillRegistersMode = NeedToSpill);
+ void cachedGetByIdWithThis(CodeOrigin, GPRReg baseGPR, GPRReg thisGPR, GPRReg resultGPR, unsigned identifierNumber, JITCompiler::JumpList slowPathTarget = JITCompiler::JumpList());
#elif USE(JSVALUE32_64)
void cachedGetById(CodeOrigin, GPRReg baseTagGPROrNone, GPRReg basePayloadGPR, GPRReg resultTagGPR, GPRReg resultPayloadGPR, unsigned identifierNumber, JITCompiler::Jump slowPathTarget = JITCompiler::Jump(), SpillRegistersMode = NeedToSpill, AccessType = AccessType::Get);
void cachedPutById(CodeOrigin, GPRReg basePayloadGPR, GPRReg valueTagGPR, GPRReg valuePayloadGPR, GPRReg scratchGPR, unsigned identifierNumber, PutKind, JITCompiler::Jump slowPathTarget = JITCompiler::Jump(), SpillRegistersMode = NeedToSpill);
+ void cachedGetByIdWithThis(CodeOrigin, GPRReg baseTagGPROrNone, GPRReg basePayloadGPR, GPRReg thisTagGPROrNone, GPRReg thisPayloadGPR, GPRReg resultTagGPR, GPRReg resultPayloadGPR, unsigned identifierNumber, JITCompiler::JumpList slowPathTarget = JITCompiler::JumpList());
#endif
void compileDeleteById(Node*);
m_jit.setupArgumentsWithExecState(TrustedImmPtr(stubInfo), arg1, TrustedImmPtr(uid));
return appendCallSetResult(operation, result);
}
+ JITCompiler::Call callOperation(J_JITOperation_ESsiJJI operation, GPRReg result, StructureStubInfo* stubInfo, GPRReg arg1, GPRReg arg2, UniquedStringImpl* uid)
+ {
+ m_jit.setupArgumentsWithExecState(TrustedImmPtr(stubInfo), arg1, arg2, TrustedImmPtr(uid));
+ return appendCallSetResult(operation, result);
+ }
JITCompiler::Call callOperation(J_JITOperation_EDA operation, GPRReg result, FPRReg arg1, GPRReg arg2)
{
m_jit.setupArgumentsWithExecState(arg1, arg2);
m_jit.setupArgumentsWithExecState(TrustedImmPtr(stubInfo), arg1.payloadGPR(), arg1.tagGPR(), TrustedImmPtr(uid));
return appendCallSetResult(operation, result.payloadGPR(), result.tagGPR());
}
+ JITCompiler::Call callOperation(J_JITOperation_ESsiJJI operation, JSValueRegs result, StructureStubInfo* stubInfo, JSValueRegs arg1, JSValueRegs arg2, UniquedStringImpl* uid)
+ {
+ m_jit.setupArgumentsWithExecState(TrustedImmPtr(stubInfo), arg1.payloadGPR(), arg1.tagGPR(), arg2.payloadGPR(), arg2.tagGPR(), TrustedImmPtr(uid));
+ return appendCallSetResult(operation, result.payloadGPR(), result.tagGPR());
+ }
+ JITCompiler::Call callOperation(J_JITOperation_ESsiJJI operation, JSValueRegs result, StructureStubInfo* stubInfo, int32_t arg1Tag, GPRReg arg1Payload, GPRReg arg2Tag, GPRReg arg2Payload, UniquedStringImpl* uid)
+ {
+ m_jit.setupArgumentsWithExecState(TrustedImmPtr(stubInfo), arg1Payload, TrustedImm32(arg1Tag), arg2Payload, arg2Tag, TrustedImmPtr(uid));
+ return appendCallSetResult(operation, result.payloadGPR(), result.tagGPR());
+ }
JITCompiler::Call callOperation(J_JITOperation_ESsiJI operation, JSValueRegs result, StructureStubInfo* stubInfo, int32_t arg1Tag, GPRReg arg1Payload, UniquedStringImpl* uid)
{
m_jit.setupArgumentsWithExecState(TrustedImmPtr(stubInfo), arg1Payload, TrustedImm32(arg1Tag), TrustedImmPtr(uid));
return appendCallSetResult(operation, result.payloadGPR(), result.tagGPR());
}
+ JITCompiler::Call callOperation(J_JITOperation_ESsiJJI operation, JSValueRegs result, StructureStubInfo* stubInfo, int32_t arg1Tag, GPRReg arg1Payload, int32_t arg2Tag, GPRReg arg2Payload, UniquedStringImpl* uid)
+ {
+ m_jit.setupArgumentsWithExecState(TrustedImmPtr(stubInfo), arg1Payload, TrustedImm32(arg1Tag), arg2Payload, TrustedImm32(arg2Tag), TrustedImmPtr(uid));
+ return appendCallSetResult(operation, result.payloadGPR(), result.tagGPR());
+ }
JITCompiler::Call callOperation(J_JITOperation_ESsiJI operation, JSValueRegs result, StructureStubInfo* stubInfo, GPRReg arg1Tag, GPRReg arg1Payload, UniquedStringImpl* uid)
{
m_jit.setupArgumentsWithExecState(TrustedImmPtr(stubInfo), arg1Payload, arg1Tag, TrustedImmPtr(uid));
addSlowPathGenerator(WTFMove(slowPath));
}
+void SpeculativeJIT::cachedGetByIdWithThis(
+ CodeOrigin codeOrigin, GPRReg baseTagGPROrNone, GPRReg basePayloadGPR, GPRReg thisTagGPR, GPRReg thisPayloadGPR, GPRReg resultTagGPR, GPRReg resultPayloadGPR,
+ unsigned identifierNumber, JITCompiler::JumpList slowPathTarget)
+{
+ RegisterSet usedRegisters = this->usedRegisters();
+
+ CallSiteIndex callSite = m_jit.recordCallSiteAndGenerateExceptionHandlingOSRExitIfNeeded(codeOrigin, m_stream->size());
+ JITGetByIdWithThisGenerator gen(
+ m_jit.codeBlock(), codeOrigin, callSite, usedRegisters, identifierUID(identifierNumber),
+ JSValueRegs(resultTagGPR, resultPayloadGPR), JSValueRegs(baseTagGPROrNone, basePayloadGPR), JSValueRegs(thisTagGPR, thisPayloadGPR), AccessType::GetWithThis);
+
+ gen.generateFastPath(m_jit);
+
+ JITCompiler::JumpList slowCases;
+ if (!slowPathTarget.empty())
+ slowCases.append(slowPathTarget);
+ slowCases.append(gen.slowPathJump());
+
+ std::unique_ptr<SlowPathGenerator> slowPath;
+ if (baseTagGPROrNone == InvalidGPRReg && thisTagGPR == InvalidGPRReg) {
+ slowPath = slowPathCall(
+ slowCases, this, operationGetByIdWithThisOptimize,
+ JSValueRegs(resultTagGPR, resultPayloadGPR), gen.stubInfo(),
+ static_cast<int32_t>(JSValue::CellTag), basePayloadGPR,
+ static_cast<int32_t>(JSValue::CellTag), thisPayloadGPR,
+ identifierUID(identifierNumber));
+ } else {
+ ASSERT(baseTagGPROrNone != InvalidGPRReg);
+ ASSERT(thisTagGPR != InvalidGPRReg);
+
+ slowPath = slowPathCall(
+ slowCases, this, operationGetByIdWithThisOptimize,
+ JSValueRegs(resultTagGPR, resultPayloadGPR), gen.stubInfo(), JSValueRegs(baseTagGPROrNone, basePayloadGPR), JSValueRegs(thisTagGPR, thisPayloadGPR), identifierUID(identifierNumber));
+ }
+
+ m_jit.addGetByIdWithThis(gen, slowPath.get());
+ addSlowPathGenerator(WTFMove(slowPath));
+}
+
void SpeculativeJIT::cachedPutById(CodeOrigin codeOrigin, GPRReg basePayloadGPR, GPRReg valueTagGPR, GPRReg valuePayloadGPR, GPRReg scratchGPR, unsigned identifierNumber, PutKind putKind, JITCompiler::Jump slowPathTarget, SpillRegistersMode spillMode)
{
RegisterSet usedRegisters = this->usedRegisters();
}
case GetByIdWithThis: {
- JSValueOperand base(this, node->child1());
- JSValueRegs baseRegs = base.jsValueRegs();
- JSValueOperand thisValue(this, node->child2());
- JSValueRegs thisRegs = thisValue.jsValueRegs();
-
- GPRFlushedCallResult resultPayload(this);
- GPRFlushedCallResult2 resultTag(this);
- GPRReg resultPayloadGPR = resultPayload.gpr();
- GPRReg resultTagGPR = resultTag.gpr();
-
- flushRegisters();
- callOperation(operationGetByIdWithThis, JSValueRegs(resultTagGPR, resultPayloadGPR), baseRegs, thisRegs, identifierUID(node->identifierNumber()));
- m_jit.exceptionCheck();
-
- jsValueResult(resultTagGPR, resultPayloadGPR, node);
+ if (node->child1().useKind() == CellUse && node->child2().useKind() == CellUse) {
+ SpeculateCellOperand base(this, node->child1());
+ SpeculateCellOperand thisValue(this, node->child2());
+ GPRTemporary resultTag(this);
+ GPRTemporary resultPayload(this);
+
+ GPRReg baseGPR = base.gpr();
+ GPRReg thisGPR = thisValue.gpr();
+ GPRReg resultTagGPR = resultTag.gpr();
+ GPRReg resultPayloadGPR = resultPayload.gpr();
+
+ cachedGetByIdWithThis(node->origin.semantic, InvalidGPRReg, baseGPR, InvalidGPRReg, thisGPR, resultTagGPR, resultPayloadGPR, node->identifierNumber());
+
+ jsValueResult(resultTagGPR, resultPayloadGPR, node, UseChildrenCalledExplicitly);
+ } else {
+ JSValueOperand base(this, node->child1());
+ JSValueOperand thisValue(this, node->child2());
+ GPRTemporary resultTag(this);
+ GPRTemporary resultPayload(this);
+
+ GPRReg baseTagGPR = base.tagGPR();
+ GPRReg basePayloadGPR = base.payloadGPR();
+ GPRReg thisTagGPR = thisValue.tagGPR();
+ GPRReg thisPayloadGPR = thisValue.payloadGPR();
+ GPRReg resultTagGPR = resultTag.gpr();
+ GPRReg resultPayloadGPR = resultPayload.gpr();
+
+ JITCompiler::JumpList notCellList;
+ notCellList.append(m_jit.branchIfNotCell(base.jsValueRegs()));
+ notCellList.append(m_jit.branchIfNotCell(thisValue.jsValueRegs()));
+
+ cachedGetByIdWithThis(node->origin.semantic, baseTagGPR, basePayloadGPR, thisTagGPR, thisPayloadGPR, resultTagGPR, resultPayloadGPR, node->identifierNumber(), notCellList);
+
+ jsValueResult(resultTagGPR, resultPayloadGPR, node, UseChildrenCalledExplicitly);
+ }
+
break;
}
addSlowPathGenerator(WTFMove(slowPath));
}
+void SpeculativeJIT::cachedGetByIdWithThis(CodeOrigin codeOrigin, GPRReg baseGPR, GPRReg thisGPR, GPRReg resultGPR, unsigned identifierNumber, JITCompiler::JumpList slowPathTarget)
+{
+ CallSiteIndex callSite = m_jit.recordCallSiteAndGenerateExceptionHandlingOSRExitIfNeeded(codeOrigin, m_stream->size());
+ RegisterSet usedRegisters = this->usedRegisters();
+ // We've already flushed registers to the stack, we don't need to spill these.
+ usedRegisters.set(baseGPR, false);
+ usedRegisters.set(thisGPR, false);
+ usedRegisters.set(resultGPR, false);
+
+ JITGetByIdWithThisGenerator gen(
+ m_jit.codeBlock(), codeOrigin, callSite, usedRegisters, identifierUID(identifierNumber),
+ JSValueRegs(resultGPR), JSValueRegs(baseGPR), JSValueRegs(thisGPR), AccessType::GetWithThis);
+ gen.generateFastPath(m_jit);
+
+ JITCompiler::JumpList slowCases;
+ if (!slowPathTarget.empty())
+ slowCases.append(slowPathTarget);
+ slowCases.append(gen.slowPathJump());
+
+ auto slowPath = slowPathCall(
+ slowCases, this, operationGetByIdWithThisOptimize,
+ DontSpill, ExceptionCheckRequirement::CheckNeeded,
+ resultGPR, gen.stubInfo(), baseGPR, thisGPR, identifierUID(identifierNumber));
+
+ m_jit.addGetByIdWithThis(gen, slowPath.get());
+ addSlowPathGenerator(WTFMove(slowPath));
+}
+
void SpeculativeJIT::cachedPutById(CodeOrigin codeOrigin, GPRReg baseGPR, GPRReg valueGPR, GPRReg scratchGPR, unsigned identifierNumber, PutKind putKind, JITCompiler::Jump slowPathTarget, SpillRegistersMode spillMode)
{
CallSiteIndex callSite = m_jit.recordCallSiteAndGenerateExceptionHandlingOSRExitIfNeeded(codeOrigin, m_stream->size());
}
case GetByIdWithThis: {
- JSValueOperand base(this, node->child1());
- GPRReg baseGPR = base.gpr();
- JSValueOperand thisValue(this, node->child2());
- GPRReg thisValueGPR = thisValue.gpr();
-
- GPRFlushedCallResult result(this);
- GPRReg resultGPR = result.gpr();
-
- flushRegisters();
- callOperation(operationGetByIdWithThis, resultGPR, baseGPR, thisValueGPR, identifierUID(node->identifierNumber()));
- m_jit.exceptionCheck();
-
- jsValueResult(resultGPR, node);
+ if (node->child1().useKind() == CellUse && node->child2().useKind() == CellUse) {
+ SpeculateCellOperand base(this, node->child1());
+ GPRReg baseGPR = base.gpr();
+ SpeculateCellOperand thisValue(this, node->child2());
+ GPRReg thisValueGPR = thisValue.gpr();
+
+ GPRFlushedCallResult result(this);
+ GPRReg resultGPR = result.gpr();
+
+ flushRegisters();
+
+ cachedGetByIdWithThis(node->origin.semantic, baseGPR, thisValueGPR, resultGPR, node->identifierNumber(), JITCompiler::JumpList());
+
+ jsValueResult(resultGPR, node);
+
+ } else {
+ JSValueOperand base(this, node->child1());
+ GPRReg baseGPR = base.gpr();
+ JSValueOperand thisValue(this, node->child2());
+ GPRReg thisValueGPR = thisValue.gpr();
+
+ GPRFlushedCallResult result(this);
+ GPRReg resultGPR = result.gpr();
+
+ flushRegisters();
+
+ JITCompiler::JumpList notCellList;
+ notCellList.append(m_jit.branchIfNotCell(JSValueRegs(baseGPR)));
+ notCellList.append(m_jit.branchIfNotCell(JSValueRegs(thisValueGPR)));
+
+ cachedGetByIdWithThis(node->origin.semantic, baseGPR, thisValueGPR, resultGPR, node->identifierNumber(), notCellList);
+
+ jsValueResult(resultGPR, node);
+ }
+
break;
}
void compileGetByIdWithThis()
{
- LValue base = lowJSValue(m_node->child1());
- LValue thisValue = lowJSValue(m_node->child2());
- LValue result = vmCall(Int64, m_out.operation(operationGetByIdWithThis), m_callFrame, base, thisValue, m_out.constIntPtr(m_graph.identifiers()[m_node->identifierNumber()]));
- setJSValue(result);
+ if (m_node->child1().useKind() == CellUse && m_node->child2().useKind() == CellUse)
+ setJSValue(getByIdWithThis(lowCell(m_node->child1()), lowCell(m_node->child2())));
+ else {
+ LValue base = lowJSValue(m_node->child1());
+ LValue thisValue = lowJSValue(m_node->child2());
+
+ LBasicBlock baseCellCase = m_out.newBlock();
+ LBasicBlock notCellCase = m_out.newBlock();
+ LBasicBlock thisValueCellCase = m_out.newBlock();
+ LBasicBlock continuation = m_out.newBlock();
+
+ m_out.branch(
+ isCell(base, provenType(m_node->child1())), unsure(baseCellCase), unsure(notCellCase));
+
+ LBasicBlock lastNext = m_out.appendTo(baseCellCase, thisValueCellCase);
+
+ m_out.branch(
+ isCell(thisValue, provenType(m_node->child2())), unsure(thisValueCellCase), unsure(notCellCase));
+
+ m_out.appendTo(thisValueCellCase, notCellCase);
+ ValueFromBlock cellResult = m_out.anchor(getByIdWithThis(base, thisValue));
+ m_out.jump(continuation);
+
+ m_out.appendTo(notCellCase, continuation);
+ ValueFromBlock notCellResult = m_out.anchor(vmCall(
+ Int64, m_out.operation(operationGetByIdWithThis),
+ m_callFrame, base, thisValue,
+ m_out.constIntPtr(m_graph.identifiers()[m_node->identifierNumber()])));
+ m_out.jump(continuation);
+
+ m_out.appendTo(continuation, lastNext);
+ setJSValue(m_out.phi(Int64, cellResult, notCellResult));
+ }
+
}
void compileGetByValWithThis()
stubInfo->codeOrigin = node->origin.semantic;
stubInfo->patch.baseGPR = static_cast<int8_t>(baseGPR);
stubInfo->patch.valueGPR = static_cast<int8_t>(resultGPR);
+ stubInfo->patch.thisGPR = static_cast<int8_t>(InvalidGPRReg);
stubInfo->patch.usedRegisters = params.unavailableRegisters();
CCallHelpers::PatchableJump jump = jit.patchableJump();
return patchpoint;
}
+
+ LValue getByIdWithThis(LValue base, LValue thisValue)
+ {
+ Node* node = m_node;
+ UniquedStringImpl* uid = m_graph.identifiers()[node->identifierNumber()];
+
+ B3::PatchpointValue* patchpoint = m_out.patchpoint(Int64);
+ patchpoint->appendSomeRegister(base);
+ patchpoint->appendSomeRegister(thisValue);
+ patchpoint->append(m_tagMask, ValueRep::lateReg(GPRInfo::tagMaskRegister));
+ patchpoint->append(m_tagTypeNumber, ValueRep::lateReg(GPRInfo::tagTypeNumberRegister));
+
+ patchpoint->clobber(RegisterSet::macroScratchRegisters());
+
+ RefPtr<PatchpointExceptionHandle> exceptionHandle =
+ preparePatchpointForExceptions(patchpoint);
+
+ State* state = &m_ftlState;
+ patchpoint->setGenerator(
+ [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+ AllowMacroScratchRegisterUsage allowScratch(jit);
+
+ CallSiteIndex callSiteIndex =
+ state->jitCode->common.addUniqueCallSiteIndex(node->origin.semantic);
+
+ // This is the direct exit target for operation calls.
+ Box<CCallHelpers::JumpList> exceptions =
+ exceptionHandle->scheduleExitCreation(params)->jumps(jit);
+
+ // This is the exit for call IC's created by the getById for getters. We don't have
+ // to do anything weird other than call this, since it will associate the exit with
+ // the callsite index.
+ exceptionHandle->scheduleExitCreationForUnwind(params, callSiteIndex);
+
+ auto generator = Box<JITGetByIdWithThisGenerator>::create(
+ jit.codeBlock(), node->origin.semantic, callSiteIndex,
+ params.unavailableRegisters(), uid, JSValueRegs(params[0].gpr()),
+ JSValueRegs(params[1].gpr()), JSValueRegs(params[2].gpr()), AccessType::GetWithThis);
+
+ generator->generateFastPath(jit);
+ CCallHelpers::Label done = jit.label();
+
+ params.addLatePath(
+ [=] (CCallHelpers& jit) {
+ AllowMacroScratchRegisterUsage allowScratch(jit);
+
+ J_JITOperation_ESsiJJI optimizationFunction = operationGetByIdWithThisOptimize;
+
+ generator->slowPathJump().link(&jit);
+ CCallHelpers::Label slowPathBegin = jit.label();
+ CCallHelpers::Call slowPathCall = callOperation(
+ *state, params.unavailableRegisters(), jit, node->origin.semantic,
+ exceptions.get(), optimizationFunction, params[0].gpr(),
+ CCallHelpers::TrustedImmPtr(generator->stubInfo()), params[1].gpr(),
+ params[2].gpr(), CCallHelpers::TrustedImmPtr(uid)).call();
+ jit.jump().linkTo(done, &jit);
+
+ generator->reportSlowPathCall(slowPathBegin, slowPathCall);
+
+ jit.addLinkTask(
+ [=] (LinkBuffer& linkBuffer) {
+ generator->finalize(linkBuffer);
+ });
+ });
+ });
+
+ return patchpoint;
+ }
LValue isFastTypedArray(LValue object)
{
addCallArgument(arg3);
addCallArgument(arg4);
}
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, TrustedImm32 arg3, GPRReg arg4, TrustedImm32 arg5, TrustedImmPtr arg6)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ addCallArgument(arg3);
+ addCallArgument(arg4);
+ addCallArgument(arg5);
+ addCallArgument(arg6);
+ }
ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImm32 arg2, TrustedImmPtr arg3, GPRReg arg4)
{
addCallArgument(arg4);
}
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, TrustedImm32 arg3, GPRReg arg4, GPRReg arg5, TrustedImmPtr arg6)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ addCallArgument(arg3);
+ addCallArgument(arg4);
+ addCallArgument(arg5);
+ addCallArgument(arg6);
+ }
+
ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImmPtr arg2, GPRReg arg3, GPRReg arg4)
{
resetCallArguments();
poke(arg4, POKE_ARGUMENT_OFFSET);
setupArgumentsWithExecState(arg1, arg2, arg3);
}
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, TrustedImm32 arg3, GPRReg arg4, GPRReg arg5, TrustedImmPtr arg6)
+ {
+ poke(arg6, POKE_ARGUMENT_OFFSET + 2);
+ poke(arg5, POKE_ARGUMENT_OFFSET + 1);
+ poke(arg4, POKE_ARGUMENT_OFFSET);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, TrustedImm32 arg3, GPRReg arg4, TrustedImm32 arg5, TrustedImmPtr arg6)
+ {
+ poke(arg6, POKE_ARGUMENT_OFFSET + 2);
+ poke(arg5, POKE_ARGUMENT_OFFSET + 1);
+ poke(arg4, POKE_ARGUMENT_OFFSET);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImmPtr arg2, TrustedImm32 arg3, GPRReg arg4, GPRReg arg5)
{
macro(OperationGetByIdGeneric) \
macro(OperationGetByIdBuildList) \
macro(OperationGetByIdOptimize) \
+ macro(OperationGetByIdWithThisOptimize) \
macro(OperationInOptimize) \
macro(OperationIn) \
macro(OperationGenericIn) \
, m_labels(codeBlock ? codeBlock->numberOfInstructions() : 0)
, m_bytecodeOffset(std::numeric_limits<unsigned>::max())
, m_getByIdIndex(UINT_MAX)
+ , m_getByIdWithThisIndex(UINT_MAX)
, m_putByIdIndex(UINT_MAX)
, m_byValInstructionIndex(UINT_MAX)
, m_callLinkInfoIndex(UINT_MAX)
Instruction* instructionsBegin = m_codeBlock->instructions().begin();
m_getByIdIndex = 0;
+ m_getByIdWithThisIndex = 0;
m_putByIdIndex = 0;
m_byValInstructionIndex = 0;
m_callLinkInfoIndex = 0;
case op_get_by_id_proto_load:
case op_get_by_id_unset:
DEFINE_SLOWCASE_OP(op_get_by_id)
+ DEFINE_SLOWCASE_OP(op_get_by_id_with_this)
DEFINE_SLOWCASE_OP(op_get_by_val)
DEFINE_SLOWCASE_OP(op_instanceof)
DEFINE_SLOWCASE_OP(op_instanceof_custom)
}
RELEASE_ASSERT(m_getByIdIndex == m_getByIds.size());
+ RELEASE_ASSERT(m_getByIdWithThisIndex == m_getByIdsWithThis.size());
RELEASE_ASSERT(m_putByIdIndex == m_putByIds.size());
RELEASE_ASSERT(m_callLinkInfoIndex == m_callCompilationInfo.size());
RELEASE_ASSERT(numberOfValueProfiles == m_codeBlock->numberOfValueProfiles());
for (unsigned i = m_getByIds.size(); i--;)
m_getByIds[i].finalize(patchBuffer);
+ for (unsigned i = m_getByIdsWithThis.size(); i--;)
+ m_getByIdsWithThis[i].finalize(patchBuffer);
for (unsigned i = m_putByIds.size(); i--;)
m_putByIds[i].finalize(patchBuffer);
void emitSlow_op_get_callee(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_try_get_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_get_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_get_by_id_with_this(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_get_arguments_length(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_get_by_val(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_get_argument_by_val(Instruction*, Vector<SlowCaseEntry>::iterator&);
#if USE(JSVALUE64)
MacroAssembler::Call callOperation(J_JITOperation_ESsiJI, int, StructureStubInfo*, GPRReg, UniquedStringImpl*);
MacroAssembler::Call callOperation(WithProfileTag, J_JITOperation_ESsiJI, int, StructureStubInfo*, GPRReg, UniquedStringImpl*);
+ MacroAssembler::Call callOperation(WithProfileTag, J_JITOperation_ESsiJJI, int, StructureStubInfo*, GPRReg, GPRReg, UniquedStringImpl*);
#else
MacroAssembler::Call callOperation(J_JITOperation_ESsiJI, int, StructureStubInfo*, GPRReg, GPRReg, UniquedStringImpl*);
MacroAssembler::Call callOperation(WithProfileTag, J_JITOperation_ESsiJI, int, StructureStubInfo*, GPRReg, GPRReg, UniquedStringImpl*);
+ MacroAssembler::Call callOperation(WithProfileTag, J_JITOperation_ESsiJJI, int, StructureStubInfo*, GPRReg, GPRReg, GPRReg, GPRReg, UniquedStringImpl*);
#endif
MacroAssembler::Call callOperation(J_JITOperation_EJI, int, GPRReg, UniquedStringImpl*);
MacroAssembler::Call callOperation(J_JITOperation_EJJ, int, GPRReg, GPRReg);
Vector<CallRecord> m_calls;
Vector<Label> m_labels;
Vector<JITGetByIdGenerator> m_getByIds;
+ Vector<JITGetByIdWithThisGenerator> m_getByIdsWithThis;
Vector<JITPutByIdGenerator> m_putByIds;
Vector<ByValCompilationInfo> m_byValCompilationInfo;
Vector<CallCompilationInfo> m_callCompilationInfo;
Label m_exceptionHandler;
unsigned m_getByIdIndex;
+ unsigned m_getByIdWithThisIndex;
unsigned m_putByIdIndex;
unsigned m_byValInstructionIndex;
unsigned m_callLinkInfoIndex;
m_stubInfo->patch.baseGPR = static_cast<int8_t>(base.payloadGPR());
m_stubInfo->patch.valueGPR = static_cast<int8_t>(value.payloadGPR());
+ m_stubInfo->patch.thisGPR = static_cast<int8_t>(InvalidGPRReg);
#if USE(JSVALUE32_64)
m_stubInfo->patch.baseTagGPR = static_cast<int8_t>(base.tagGPR());
m_stubInfo->patch.valueTagGPR = static_cast<int8_t>(value.tagGPR());
+ m_stubInfo->patch.thisTagGPR = static_cast<int8_t>(InvalidGPRReg);
#endif
}
generateFastCommon(jit, m_isLengthAccess ? InlineAccess::sizeForLengthAccess() : InlineAccess::sizeForPropertyAccess());
}
+JITGetByIdWithThisGenerator::JITGetByIdWithThisGenerator(
+ CodeBlock* codeBlock, CodeOrigin codeOrigin, CallSiteIndex callSite, const RegisterSet& usedRegisters,
+ UniquedStringImpl*, JSValueRegs value, JSValueRegs base, JSValueRegs thisRegs, AccessType accessType)
+ : JITByIdGenerator(codeBlock, codeOrigin, callSite, accessType, usedRegisters, base, value)
+{
+ RELEASE_ASSERT(thisRegs.payloadGPR() != thisRegs.tagGPR());
+
+ m_stubInfo->patch.thisGPR = static_cast<int8_t>(thisRegs.payloadGPR());
+#if USE(JSVALUE32_64)
+ m_stubInfo->patch.thisTagGPR = static_cast<int8_t>(thisRegs.tagGPR());
+#endif
+}
+
+void JITGetByIdWithThisGenerator::generateFastPath(MacroAssembler& jit)
+{
+ generateFastCommon(jit, InlineAccess::sizeForPropertyAccess());
+}
+
JITPutByIdGenerator::JITPutByIdGenerator(
CodeBlock* codeBlock, CodeOrigin codeOrigin, CallSiteIndex callSite, const RegisterSet& usedRegisters,
JSValueRegs base, JSValueRegs value, GPRReg scratch,
bool m_isLengthAccess;
};
+class JITGetByIdWithThisGenerator : public JITByIdGenerator {
+public:
+ JITGetByIdWithThisGenerator() { }
+
+ JITGetByIdWithThisGenerator(
+ CodeBlock*, CodeOrigin, CallSiteIndex, const RegisterSet& usedRegisters, UniquedStringImpl* propertyName,
+ JSValueRegs value, JSValueRegs base, JSValueRegs thisRegs, AccessType);
+
+ void generateFastPath(MacroAssembler&);
+};
+
class JITPutByIdGenerator : public JITByIdGenerator {
public:
JITPutByIdGenerator() { }
return appendCallWithExceptionCheckSetJSValueResultWithProfile(operation, dst);
}
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(JIT::WithProfileTag, J_JITOperation_ESsiJJI operation, int dst, StructureStubInfo* stubInfo, GPRReg arg1, GPRReg arg2, UniquedStringImpl* uid)
+{
+ setupArgumentsWithExecState(TrustedImmPtr(stubInfo), arg1, arg2, TrustedImmPtr(uid));
+ return appendCallWithExceptionCheckSetJSValueResultWithProfile(operation, dst);
+}
+
ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(JIT::WithProfileTag, J_JITOperation_EJJ operation, int dst, GPRReg arg1, GPRReg arg2)
{
setupArgumentsWithExecState(arg1, arg2);
return appendCallWithExceptionCheckSetJSValueResult(operation, dst);
}
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(JIT::WithProfileTag, J_JITOperation_ESsiJJI operation, int dst, StructureStubInfo* stubInfo, GPRReg arg1Tag, GPRReg arg1Payload, GPRReg arg2Tag, GPRReg arg2Payload, UniquedStringImpl* uid)
+{
+ setupArgumentsWithExecState(TrustedImmPtr(stubInfo), arg1Payload, arg1Tag, arg2Payload, arg2Tag, TrustedImmPtr(uid));
+ return appendCallWithExceptionCheckSetJSValueResultWithProfile(operation, dst);
+}
+
ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EJJ operation, int dst, GPRReg arg1Tag, GPRReg arg1Payload, GPRReg arg2Tag, GPRReg arg2Payload)
{
setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag, arg2Payload, arg2Tag);
}));
}
+EncodedJSValue JIT_OPERATION operationGetByIdWithThisGeneric(ExecState* exec, StructureStubInfo* stubInfo, EncodedJSValue base, EncodedJSValue thisEncoded, UniquedStringImpl* uid)
+{
+ SuperSamplerScope superSamplerScope(false);
+
+ VM* vm = &exec->vm();
+ NativeCallFrameTracer tracer(vm, exec);
+ Identifier ident = Identifier::fromUid(vm, uid);
+
+ stubInfo->tookSlowPath = true;
+
+ JSValue baseValue = JSValue::decode(base);
+ JSValue thisValue = JSValue::decode(thisEncoded);
+ PropertySlot slot(thisValue, PropertySlot::InternalMethodType::Get);
+
+ return JSValue::encode(baseValue.get(exec, ident, slot));
+}
+
+EncodedJSValue JIT_OPERATION operationGetByIdWithThisOptimize(ExecState* exec, StructureStubInfo* stubInfo, EncodedJSValue base, EncodedJSValue thisEncoded, UniquedStringImpl* uid)
+{
+ SuperSamplerScope superSamplerScope(false);
+
+ VM* vm = &exec->vm();
+ NativeCallFrameTracer tracer(vm, exec);
+ Identifier ident = Identifier::fromUid(vm, uid);
+
+ JSValue baseValue = JSValue::decode(base);
+ JSValue thisValue = JSValue::decode(thisEncoded);
+ LOG_IC((ICEvent::OperationGetByIdWithThisOptimize, baseValue.classInfoOrNull(*vm), ident));
+
+ PropertySlot slot(thisValue, PropertySlot::InternalMethodType::Get);
+ return JSValue::encode(baseValue.getPropertySlot(exec, ident, slot, [&] (bool found, PropertySlot& slot) -> JSValue {
+ if (stubInfo->considerCaching(exec->codeBlock(), baseValue.structureOrNull()))
+ repatchGetByID(exec, baseValue, ident, slot, *stubInfo, GetByIDKind::WithThis);
+ return found ? slot.getValue(exec, ident) : jsUndefined();
+ }));
+}
+
EncodedJSValue JIT_OPERATION operationInOptimize(ExecState* exec, StructureStubInfo* stubInfo, JSCell* base, UniquedStringImpl* key)
{
SuperSamplerScope superSamplerScope(false);
typedef EncodedJSValue (JIT_OPERATION *J_JITOperation_ESS)(ExecState*, size_t, size_t);
typedef EncodedJSValue (JIT_OPERATION *J_JITOperation_ESsiCI)(ExecState*, StructureStubInfo*, JSCell*, UniquedStringImpl*);
typedef EncodedJSValue (JIT_OPERATION *J_JITOperation_ESsiJI)(ExecState*, StructureStubInfo*, EncodedJSValue, UniquedStringImpl*);
+typedef EncodedJSValue (JIT_OPERATION *J_JITOperation_ESsiJJI)(ExecState*, StructureStubInfo*, EncodedJSValue, EncodedJSValue, UniquedStringImpl*);
typedef EncodedJSValue (JIT_OPERATION *J_JITOperation_EZ)(ExecState*, int32_t);
typedef EncodedJSValue (JIT_OPERATION *J_JITOperation_EZIcfZ)(ExecState*, int32_t, InlineCallFrame*, int32_t);
typedef EncodedJSValue (JIT_OPERATION *J_JITOperation_EZZ)(ExecState*, int32_t, int32_t);
EncodedJSValue JIT_OPERATION operationGetById(ExecState*, StructureStubInfo*, EncodedJSValue, UniquedStringImpl*) WTF_INTERNAL;
EncodedJSValue JIT_OPERATION operationGetByIdGeneric(ExecState*, EncodedJSValue, UniquedStringImpl*) WTF_INTERNAL;
EncodedJSValue JIT_OPERATION operationGetByIdOptimize(ExecState*, StructureStubInfo*, EncodedJSValue, UniquedStringImpl*) WTF_INTERNAL;
+EncodedJSValue JIT_OPERATION operationGetByIdWithThisGeneric(ExecState*, StructureStubInfo*, EncodedJSValue, EncodedJSValue, UniquedStringImpl*) WTF_INTERNAL;
+EncodedJSValue JIT_OPERATION operationGetByIdWithThisOptimize(ExecState*, StructureStubInfo*, EncodedJSValue, EncodedJSValue, UniquedStringImpl*) WTF_INTERNAL;
EncodedJSValue JIT_OPERATION operationInOptimize(ExecState*, StructureStubInfo*, JSCell*, UniquedStringImpl*) WTF_INTERNAL;
EncodedJSValue JIT_OPERATION operationIn(ExecState*, StructureStubInfo*, JSCell*, UniquedStringImpl*) WTF_INTERNAL;
EncodedJSValue JIT_OPERATION operationGenericIn(ExecState*, JSCell*, EncodedJSValue) WTF_INTERNAL;
void JIT::emit_op_get_by_id_with_this(Instruction* currentInstruction)
{
- JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_get_by_id_with_this);
- slowPathCall.call();
+ int resultVReg = currentInstruction[1].u.operand;
+ int baseVReg = currentInstruction[2].u.operand;
+ int thisVReg = currentInstruction[3].u.operand;
+ const Identifier* ident = &(m_codeBlock->identifier(currentInstruction[4].u.operand));
+
+ emitGetVirtualRegister(baseVReg, regT0);
+ emitGetVirtualRegister(thisVReg, regT1);
+ emitJumpSlowCaseIfNotJSCell(regT0, baseVReg);
+ emitJumpSlowCaseIfNotJSCell(regT1, thisVReg);
+
+ JITGetByIdWithThisGenerator gen(
+ m_codeBlock, CodeOrigin(m_bytecodeOffset), CallSiteIndex(m_bytecodeOffset), RegisterSet::stubUnavailableRegisters(),
+ ident->impl(), JSValueRegs(regT0), JSValueRegs(regT0), JSValueRegs(regT1), AccessType::GetWithThis);
+ gen.generateFastPath(*this);
+ addSlowCase(gen.slowPathJump());
+ m_getByIdsWithThis.append(gen);
+
+ emitValueProfilingSite();
+ emitPutVirtualRegister(resultVReg);
}
void JIT::emit_op_get_by_val_with_this(Instruction* currentInstruction)
gen.reportSlowPathCall(coldPathBegin, call);
}
+void JIT::emitSlow_op_get_by_id_with_this(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ int resultVReg = currentInstruction[1].u.operand;
+ int baseVReg = currentInstruction[2].u.operand;
+ int thisVReg = currentInstruction[3].u.operand;
+ const Identifier* ident = &(m_codeBlock->identifier(currentInstruction[4].u.operand));
+
+ linkSlowCaseIfNotJSCell(iter, baseVReg);
+ linkSlowCaseIfNotJSCell(iter, thisVReg);
+ linkSlowCase(iter);
+
+ JITGetByIdWithThisGenerator& gen = m_getByIdsWithThis[m_getByIdWithThisIndex++];
+
+ Label coldPathBegin = label();
+
+ Call call = callOperation(WithProfile, operationGetByIdWithThisOptimize, resultVReg, gen.stubInfo(), regT0, regT1, ident->impl());
+
+ gen.reportSlowPathCall(coldPathBegin, call);
+}
+
void JIT::emit_op_put_by_id(Instruction* currentInstruction)
{
int baseVReg = currentInstruction[1].u.operand;
gen.reportSlowPathCall(coldPathBegin, call);
}
+void JIT::emit_op_get_by_id_with_this(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ int base = currentInstruction[2].u.operand;
+ int thisVReg = currentInstruction[3].u.operand;
+ const Identifier* ident = &(m_codeBlock->identifier(currentInstruction[4].u.operand));
+
+ emitLoad(base, regT1, regT0);
+ emitLoad(thisVReg, regT4, regT3);
+ emitJumpSlowCaseIfNotJSCell(base, regT1);
+ emitJumpSlowCaseIfNotJSCell(thisVReg, regT4);
+
+ JITGetByIdWithThisGenerator gen(
+ m_codeBlock, CodeOrigin(m_bytecodeOffset), CallSiteIndex(currentInstruction), RegisterSet::stubUnavailableRegisters(),
+ ident->impl(), JSValueRegs(regT1, regT0), JSValueRegs::payloadOnly(regT0), JSValueRegs(regT4, regT3), AccessType::GetWithThis);
+ gen.generateFastPath(*this);
+ addSlowCase(gen.slowPathJump());
+ m_getByIdsWithThis.append(gen);
+
+ emitValueProfilingSite();
+ emitStore(dst, regT1, regT0);
+}
+
+void JIT::emitSlow_op_get_by_id_with_this(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ int resultVReg = currentInstruction[1].u.operand;
+ int baseVReg = currentInstruction[2].u.operand;
+ int thisVReg = currentInstruction[3].u.operand;
+ const Identifier* ident = &(m_codeBlock->identifier(currentInstruction[4].u.operand));
+
+ linkSlowCaseIfNotJSCell(iter, baseVReg);
+ linkSlowCaseIfNotJSCell(iter, thisVReg);
+ linkSlowCase(iter);
+
+ JITGetByIdWithThisGenerator& gen = m_getByIdsWithThis[m_getByIdWithThisIndex++];
+
+ Label coldPathBegin = label();
+
+ Call call = callOperation(WithProfile, operationGetByIdWithThisOptimize, resultVReg, gen.stubInfo(), regT1, regT0, regT4, regT3, ident->impl());
+
+ gen.reportSlowPathCall(coldPathBegin, call);
+}
+
void JIT::emit_op_put_by_id(Instruction* currentInstruction)
{
// In order to be able to patch both the Structure, and the object offset, we store one pointer,
store32(regT2, Address(regT0, DirectArguments::storageOffset() + index * sizeof(WriteBarrier<Unknown>) + PayloadOffset));
}
-void JIT::emit_op_get_by_id_with_this(Instruction* currentInstruction)
-{
- JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_get_by_id_with_this);
- slowPathCall.call();
-}
-
void JIT::emit_op_get_by_val_with_this(Instruction* currentInstruction)
{
JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_get_by_val_with_this);
#endif
}
-inline J_JITOperation_ESsiJI appropriateOptimizingGetByIdFunction(GetByIDKind kind)
+inline FunctionPtr appropriateOptimizingGetByIdFunction(GetByIDKind kind)
{
if (kind == GetByIDKind::Normal)
return operationGetByIdOptimize;
+ else if (kind == GetByIDKind::WithThis)
+ return operationGetByIdWithThisOptimize;
return operationTryGetByIdOptimize;
}
-inline J_JITOperation_ESsiJI appropriateGenericGetByIdFunction(GetByIDKind kind)
+inline FunctionPtr appropriateGenericGetByIdFunction(GetByIDKind kind)
{
if (kind == GetByIDKind::Normal)
return operationGetById;
+ else if (kind == GetByIDKind::WithThis)
+ return operationGetByIdWithThisGeneric;
return operationTryGetById;
}
else
type = AccessCase::CustomValueGetter;
+ // we don't emit IC for DOMJIT when op is get_by_id_with_this
+ if (Options::useDOMJIT() && kind == GetByIDKind::WithThis && type == AccessCase::CustomAccessorGetter && domJIT)
+ return GiveUpOnCache;
+
newCase = GetterSetterAccessCase::create(
vm, codeBlock, type, offset, structure, conditionSet, loadTargetFromProxy,
slot.watchpointSet(), slot.isCacheableCustom() ? slot.customGetter() : nullptr,
enum class GetByIDKind {
Normal,
- Try
+ Try,
+ WithThis
};
void repatchGetByID(ExecState*, JSValue, const Identifier&, const PropertySlot&, StructureStubInfo&, GetByIDKind);
slot.setCacheableCustom(thisObject, DontDelete | ReadOnly | DontEnum, thisObject->customGetter);
return true;
}
+
+ if (propertyName == PropertyName(Identifier::fromString(exec, "customGetterAccessor"))) {
+ slot.setCacheableCustom(thisObject, DontDelete | ReadOnly | DontEnum | CustomAccessor, thisObject->customGetterAcessor);
+ return true;
+ }
+
return JSObject::getOwnPropertySlot(thisObject, exec, propertyName, slot);
}
return throwVMTypeError(exec, scope);
return JSValue::encode(jsNumber(100));
}
+
+ static EncodedJSValue customGetterAcessor(ExecState* exec, EncodedJSValue thisValue, PropertyName)
+ {
+ VM& vm = exec->vm();
+ auto scope = DECLARE_THROW_SCOPE(vm);
+
+ JSObject* thisObject = jsDynamicCast<JSObject*>(vm, JSValue::decode(thisValue));
+ if (!thisObject)
+ return throwVMTypeError(exec, scope);
+ bool shouldThrow = thisObject->get(exec, PropertyName(Identifier::fromString(exec, "shouldThrow"))).toBoolean(exec);
+ if (shouldThrow)
+ return throwVMTypeError(exec, scope);
+ return JSValue::encode(jsNumber(100));
+ }
};
class RuntimeArray : public JSArray {