https://bugs.webkit.org/show_bug.cgi?id=149601
Reviewed by Filip Pizlo.
Source/JavaScriptCore:
Before, if we had a PolymorphicAccess with and a StructureStubInfo
with a NeedToSpill spillMode, we wouldn't generate getter/setter
calls. This patch changes it such that we will generate the
getter/setter call and do the necessary register spilling/filling
around the getter/setter call to preserve any "usedRegisters".
This has an interesting story with how it relates to exception handling
inside the DFG. Because the GetById variants are considered a throwing call
site, we must make sure that we properly restore the registers spilled to the stack
in case of an exception being thrown inside the getter/setter call. We do
this by having the inline cache register itself as a new exception handling
call site. When the inline cache "catches" the exception (i.e, genericUnwind
will jump to this code), it will restore the registers it spilled that are
live inside the original catch handler, and then jump to the original catch
handler. We make sure to only generate this makeshift catch handler when we
actually need to do any cleanup. If we determine that we don't need to restore
any registers, we don't bother generating this makeshift catch handler.
* bytecode/CodeBlock.cpp:
(JSC::CodeBlock::~CodeBlock):
(JSC::CodeBlock::handlerForIndex):
(JSC::CodeBlock::newExceptionHandlingCallSiteIndex):
(JSC::CodeBlock::removeExceptionHandlerForCallSite):
(JSC::CodeBlock::lineNumberForBytecodeOffset):
* bytecode/CodeBlock.h:
(JSC::CodeBlock::appendExceptionHandler):
* bytecode/PolymorphicAccess.cpp:
(JSC::AccessGenerationState::AccessGenerationState):
(JSC::AccessGenerationState::restoreScratch):
(JSC::AccessGenerationState::succeed):
(JSC::AccessGenerationState::calculateLiveRegistersForCallAndExceptionHandling):
(JSC::AccessGenerationState::preserveLiveRegistersToStackForCall):
(JSC::AccessGenerationState::restoreLiveRegistersFromStackForCall):
(JSC::AccessGenerationState::restoreLiveRegistersFromStackForCallWithThrownException):
(JSC::AccessGenerationState::liveRegistersForCall):
(JSC::AccessGenerationState::callSiteIndexForExceptionHandlingOrOriginal):
(JSC::AccessGenerationState::callSiteIndexForExceptionHandling):
(JSC::AccessGenerationState::originalExceptionHandler):
(JSC::AccessGenerationState::numberOfStackBytesUsedForRegisterPreservation):
(JSC::AccessGenerationState::needsToRestoreRegistersIfException):
(JSC::AccessGenerationState::originalCallSiteIndex):
(JSC::AccessGenerationState::liveRegistersToPreserveAtExceptionHandlingCallSite):
(JSC::AccessCase::AccessCase):
(JSC::AccessCase::generate):
(JSC::PolymorphicAccess::regenerateWithCases):
(JSC::PolymorphicAccess::regenerate):
(JSC::PolymorphicAccess::aboutToDie):
* bytecode/PolymorphicAccess.h:
(JSC::AccessCase::doesCalls):
(JSC::AccessCase::isGetter):
(JSC::AccessCase::callLinkInfo):
* bytecode/StructureStubInfo.cpp:
(JSC::StructureStubInfo::deref):
(JSC::StructureStubInfo::aboutToDie):
(JSC::StructureStubInfo::addAccessCase):
* bytecode/StructureStubInfo.h:
* bytecode/ValueRecovery.h:
(JSC::ValueRecovery::isInJSValueRegs):
(JSC::ValueRecovery::fpr):
* dfg/DFGCommonData.cpp:
(JSC::DFG::CommonData::addCodeOrigin):
(JSC::DFG::CommonData::addCodeOriginUnconditionally):
(JSC::DFG::CommonData::lastCallSite):
(JSC::DFG::CommonData::removeCallSiteIndex):
(JSC::DFG::CommonData::shrinkToFit):
* dfg/DFGCommonData.h:
* dfg/DFGJITCode.cpp:
(JSC::DFG::JITCode::reconstruct):
(JSC::DFG::JITCode::liveRegistersToPreserveAtExceptionHandlingCallSite):
(JSC::DFG::JITCode::checkIfOptimizationThresholdReached):
* dfg/DFGJITCode.h:
(JSC::DFG::JITCode::osrEntryBlock):
(JSC::DFG::JITCode::setOSREntryBlock):
* dfg/DFGJITCompiler.cpp:
(JSC::DFG::JITCompiler::appendExceptionHandlingOSRExit):
* dfg/DFGOSRExit.cpp:
(JSC::DFG::OSRExit::OSRExit):
* dfg/DFGOSRExit.h:
* dfg/DFGSpeculativeJIT.cpp:
(JSC::DFG::SpeculativeJIT::compileIn):
* dfg/DFGSpeculativeJIT32_64.cpp:
(JSC::DFG::SpeculativeJIT::cachedGetById):
(JSC::DFG::SpeculativeJIT::cachedPutById):
* dfg/DFGSpeculativeJIT64.cpp:
(JSC::DFG::SpeculativeJIT::cachedGetById):
(JSC::DFG::SpeculativeJIT::cachedPutById):
* ftl/FTLCompile.cpp:
(JSC::FTL::mmAllocateDataSection):
* ftl/FTLJITCode.cpp:
(JSC::FTL::JITCode::validateReferences):
(JSC::FTL::JITCode::liveRegistersToPreserveAtExceptionHandlingCallSite):
* ftl/FTLJITCode.h:
(JSC::FTL::JITCode::handles):
(JSC::FTL::JITCode::dataSections):
* jit/GCAwareJITStubRoutine.cpp:
(JSC::GCAwareJITStubRoutine::GCAwareJITStubRoutine):
(JSC::GCAwareJITStubRoutine::~GCAwareJITStubRoutine):
(JSC::GCAwareJITStubRoutine::observeZeroRefCount):
(JSC::MarkingGCAwareJITStubRoutineWithOneObject::markRequiredObjectsInternal):
(JSC::GCAwareJITStubRoutineWithExceptionHandler::GCAwareJITStubRoutineWithExceptionHandler):
(JSC::GCAwareJITStubRoutineWithExceptionHandler::aboutToDie):
(JSC::GCAwareJITStubRoutineWithExceptionHandler::~GCAwareJITStubRoutineWithExceptionHandler):
(JSC::createJITStubRoutine):
* jit/GCAwareJITStubRoutine.h:
* jit/JITCode.cpp:
(JSC::NativeJITCode::addressForCall):
(JSC::JITCode::liveRegistersToPreserveAtExceptionHandlingCallSite):
* jit/JITCode.h:
* jit/JITInlineCacheGenerator.cpp:
(JSC::JITByIdGenerator::JITByIdGenerator):
(JSC::JITGetByIdGenerator::JITGetByIdGenerator):
(JSC::JITPutByIdGenerator::JITPutByIdGenerator):
* jit/JITInlineCacheGenerator.h:
(JSC::JITByIdGenerator::reportSlowPathCall):
* jit/JITPropertyAccess.cpp:
(JSC::JIT::emitGetByValWithCachedId):
(JSC::JIT::emitPutByValWithCachedId):
(JSC::JIT::emit_op_get_by_id):
(JSC::JIT::emit_op_put_by_id):
* jit/JITPropertyAccess32_64.cpp:
(JSC::JIT::emitGetByValWithCachedId):
(JSC::JIT::emitPutByValWithCachedId):
(JSC::JIT::emit_op_get_by_id):
(JSC::JIT::emit_op_put_by_id):
* jit/JITStubRoutine.h:
(JSC::JITStubRoutine::createSelfManagedRoutine):
(JSC::JITStubRoutine::aboutToDie):
* jit/RegisterSet.cpp:
(JSC::RegisterSet::webAssemblyCalleeSaveRegisters):
(JSC::RegisterSet::registersToNotSaveForCall):
(JSC::RegisterSet::allGPRs):
* jit/RegisterSet.h:
(JSC::RegisterSet::set):
(JSC::RegisterSet::clear):
* jit/ScratchRegisterAllocator.cpp:
(JSC::ScratchRegisterAllocator::allocateScratchGPR):
(JSC::ScratchRegisterAllocator::allocateScratchFPR):
(JSC::ScratchRegisterAllocator::preserveReusedRegistersByPushing):
(JSC::ScratchRegisterAllocator::restoreReusedRegistersByPopping):
(JSC::ScratchRegisterAllocator::usedRegistersForCall):
(JSC::ScratchRegisterAllocator::preserveUsedRegistersToScratchBufferForCall):
(JSC::ScratchRegisterAllocator::restoreUsedRegistersFromScratchBufferForCall):
(JSC::ScratchRegisterAllocator::preserveRegistersToStackForCall):
(JSC::ScratchRegisterAllocator::restoreRegistersFromStackForCall):
* jit/ScratchRegisterAllocator.h:
(JSC::ScratchRegisterAllocator::numberOfReusedRegisters):
(JSC::ScratchRegisterAllocator::usedRegisters):
* jsc.cpp:
(WTF::CustomGetter::CustomGetter):
(WTF::CustomGetter::createStructure):
(WTF::CustomGetter::create):
(WTF::CustomGetter::getOwnPropertySlot):
(WTF::CustomGetter::customGetter):
(WTF::Element::handleOwner):
(GlobalObject::finishCreation):
(functionCreateImpureGetter):
(functionCreateCustomGetterObject):
(functionSetImpureGetterDelegate):
* tests/stress/try-catch-custom-getter-as-get-by-id.js: Added.
(assert):
(bar):
(foo):
* tests/stress/try-catch-getter-as-get-by-id-register-restoration.js: Added.
(assert):
(o1.get f):
(bar):
(foo):
* tests/stress/try-catch-getter-as-get-by-id.js: Added.
(assert):
(o1.get f):
(bar):
(foo):
* tests/stress/try-catch-setter-as-put-by-id.js: Added.
(assert):
(o1.set f):
(bar):
(foo):
* tests/stress/try-catch-stub-routine-replaced.js: Added.
(assert):
(arr):
(hello):
(foo):
(objChain.get f):
(fakeOut.get f):
(o.get f):
LayoutTests:
* js/regress/custom-setter-getter-as-put-get-by-id-expected.txt: Added.
* js/regress/custom-setter-getter-as-put-get-by-id.html: Added.
* js/regress/script-tests/custom-setter-getter-as-put-get-by-id.js: Added.
(assert):
(test):
git-svn-id: https://svn.webkit.org/repository/webkit/trunk@190735
268f45cc-cd09-0410-ab3c-
d52691b4dbfc
+2015-10-08 Saam barati <sbarati@apple.com>
+
+ We should be able to inline getter/setter calls inside an inline cache even when the SpillRegistersMode is NeedsToSpill
+ https://bugs.webkit.org/show_bug.cgi?id=149601
+
+ Reviewed by Filip Pizlo.
+
+ * js/regress/custom-setter-getter-as-put-get-by-id-expected.txt: Added.
+ * js/regress/custom-setter-getter-as-put-get-by-id.html: Added.
+ * js/regress/script-tests/custom-setter-getter-as-put-get-by-id.js: Added.
+ (assert):
+ (test):
+
2015-10-08 Alexey Proskuryakov <ap@apple.com>
fast/events/scroll-after-click-on-tab-index.html is flaky
--- /dev/null
+JSRegress/custom-setter-getter-as-put-get-by-id
+
+On success, you will see a series of "PASS" messages, followed by "TEST COMPLETE".
+
+
+PASS no exception thrown
+PASS successfullyParsed is true
+
+TEST COMPLETE
+
--- /dev/null
+<!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML//EN">
+<html>
+<head>
+<script src="../../resources/js-test-pre.js"></script>
+</head>
+<body>
+<script src="../../resources/regress-pre.js"></script>
+<script src="script-tests/custom-setter-getter-as-put-get-by-id.js"></script>
+<script src="../../resources/regress-post.js"></script>
+<script src="../../resources/js-test-post.js"></script>
+</body>
+</html>
--- /dev/null
+function assert(b) {
+ if (!b)
+ throw new Error("bad assertion");
+}
+noInline(assert);
+
+// RegExp.input is a handy custom getter/setter.
+var o1 = RegExp;
+function test(o) {
+ o.input = "bar";
+ return o.input;
+}
+noInline(test);
+
+var o2 = {
+ input: "hello"
+}
+
+var o3 = {
+ x: 20,
+ input: "hello"
+}
+
+// First compile as GetById node.
+for (let i = 0; i < 1000; i++) {
+ assert(test(i % 2 ? o2 : o3) === "bar");
+}
+
+// Cause the inline cache to generate customSetter/customGetter code on a GetBydId.
+for (let i = 0; i < 100; i++) {
+ assert(test(o1) === "bar");
+}
+
+2015-10-08 Saam barati <sbarati@apple.com>
+
+ We should be able to inline getter/setter calls inside an inline cache even when the SpillRegistersMode is NeedsToSpill
+ https://bugs.webkit.org/show_bug.cgi?id=149601
+
+ Reviewed by Filip Pizlo.
+
+ Before, if we had a PolymorphicAccess with and a StructureStubInfo
+ with a NeedToSpill spillMode, we wouldn't generate getter/setter
+ calls. This patch changes it such that we will generate the
+ getter/setter call and do the necessary register spilling/filling
+ around the getter/setter call to preserve any "usedRegisters".
+
+ This has an interesting story with how it relates to exception handling
+ inside the DFG. Because the GetById variants are considered a throwing call
+ site, we must make sure that we properly restore the registers spilled to the stack
+ in case of an exception being thrown inside the getter/setter call. We do
+ this by having the inline cache register itself as a new exception handling
+ call site. When the inline cache "catches" the exception (i.e, genericUnwind
+ will jump to this code), it will restore the registers it spilled that are
+ live inside the original catch handler, and then jump to the original catch
+ handler. We make sure to only generate this makeshift catch handler when we
+ actually need to do any cleanup. If we determine that we don't need to restore
+ any registers, we don't bother generating this makeshift catch handler.
+
+ * bytecode/CodeBlock.cpp:
+ (JSC::CodeBlock::~CodeBlock):
+ (JSC::CodeBlock::handlerForIndex):
+ (JSC::CodeBlock::newExceptionHandlingCallSiteIndex):
+ (JSC::CodeBlock::removeExceptionHandlerForCallSite):
+ (JSC::CodeBlock::lineNumberForBytecodeOffset):
+ * bytecode/CodeBlock.h:
+ (JSC::CodeBlock::appendExceptionHandler):
+ * bytecode/PolymorphicAccess.cpp:
+ (JSC::AccessGenerationState::AccessGenerationState):
+ (JSC::AccessGenerationState::restoreScratch):
+ (JSC::AccessGenerationState::succeed):
+ (JSC::AccessGenerationState::calculateLiveRegistersForCallAndExceptionHandling):
+ (JSC::AccessGenerationState::preserveLiveRegistersToStackForCall):
+ (JSC::AccessGenerationState::restoreLiveRegistersFromStackForCall):
+ (JSC::AccessGenerationState::restoreLiveRegistersFromStackForCallWithThrownException):
+ (JSC::AccessGenerationState::liveRegistersForCall):
+ (JSC::AccessGenerationState::callSiteIndexForExceptionHandlingOrOriginal):
+ (JSC::AccessGenerationState::callSiteIndexForExceptionHandling):
+ (JSC::AccessGenerationState::originalExceptionHandler):
+ (JSC::AccessGenerationState::numberOfStackBytesUsedForRegisterPreservation):
+ (JSC::AccessGenerationState::needsToRestoreRegistersIfException):
+ (JSC::AccessGenerationState::originalCallSiteIndex):
+ (JSC::AccessGenerationState::liveRegistersToPreserveAtExceptionHandlingCallSite):
+ (JSC::AccessCase::AccessCase):
+ (JSC::AccessCase::generate):
+ (JSC::PolymorphicAccess::regenerateWithCases):
+ (JSC::PolymorphicAccess::regenerate):
+ (JSC::PolymorphicAccess::aboutToDie):
+ * bytecode/PolymorphicAccess.h:
+ (JSC::AccessCase::doesCalls):
+ (JSC::AccessCase::isGetter):
+ (JSC::AccessCase::callLinkInfo):
+ * bytecode/StructureStubInfo.cpp:
+ (JSC::StructureStubInfo::deref):
+ (JSC::StructureStubInfo::aboutToDie):
+ (JSC::StructureStubInfo::addAccessCase):
+ * bytecode/StructureStubInfo.h:
+ * bytecode/ValueRecovery.h:
+ (JSC::ValueRecovery::isInJSValueRegs):
+ (JSC::ValueRecovery::fpr):
+ * dfg/DFGCommonData.cpp:
+ (JSC::DFG::CommonData::addCodeOrigin):
+ (JSC::DFG::CommonData::addCodeOriginUnconditionally):
+ (JSC::DFG::CommonData::lastCallSite):
+ (JSC::DFG::CommonData::removeCallSiteIndex):
+ (JSC::DFG::CommonData::shrinkToFit):
+ * dfg/DFGCommonData.h:
+ * dfg/DFGJITCode.cpp:
+ (JSC::DFG::JITCode::reconstruct):
+ (JSC::DFG::JITCode::liveRegistersToPreserveAtExceptionHandlingCallSite):
+ (JSC::DFG::JITCode::checkIfOptimizationThresholdReached):
+ * dfg/DFGJITCode.h:
+ (JSC::DFG::JITCode::osrEntryBlock):
+ (JSC::DFG::JITCode::setOSREntryBlock):
+ * dfg/DFGJITCompiler.cpp:
+ (JSC::DFG::JITCompiler::appendExceptionHandlingOSRExit):
+ * dfg/DFGOSRExit.cpp:
+ (JSC::DFG::OSRExit::OSRExit):
+ * dfg/DFGOSRExit.h:
+ * dfg/DFGSpeculativeJIT.cpp:
+ (JSC::DFG::SpeculativeJIT::compileIn):
+ * dfg/DFGSpeculativeJIT32_64.cpp:
+ (JSC::DFG::SpeculativeJIT::cachedGetById):
+ (JSC::DFG::SpeculativeJIT::cachedPutById):
+ * dfg/DFGSpeculativeJIT64.cpp:
+ (JSC::DFG::SpeculativeJIT::cachedGetById):
+ (JSC::DFG::SpeculativeJIT::cachedPutById):
+ * ftl/FTLCompile.cpp:
+ (JSC::FTL::mmAllocateDataSection):
+ * ftl/FTLJITCode.cpp:
+ (JSC::FTL::JITCode::validateReferences):
+ (JSC::FTL::JITCode::liveRegistersToPreserveAtExceptionHandlingCallSite):
+ * ftl/FTLJITCode.h:
+ (JSC::FTL::JITCode::handles):
+ (JSC::FTL::JITCode::dataSections):
+ * jit/GCAwareJITStubRoutine.cpp:
+ (JSC::GCAwareJITStubRoutine::GCAwareJITStubRoutine):
+ (JSC::GCAwareJITStubRoutine::~GCAwareJITStubRoutine):
+ (JSC::GCAwareJITStubRoutine::observeZeroRefCount):
+ (JSC::MarkingGCAwareJITStubRoutineWithOneObject::markRequiredObjectsInternal):
+ (JSC::GCAwareJITStubRoutineWithExceptionHandler::GCAwareJITStubRoutineWithExceptionHandler):
+ (JSC::GCAwareJITStubRoutineWithExceptionHandler::aboutToDie):
+ (JSC::GCAwareJITStubRoutineWithExceptionHandler::~GCAwareJITStubRoutineWithExceptionHandler):
+ (JSC::createJITStubRoutine):
+ * jit/GCAwareJITStubRoutine.h:
+ * jit/JITCode.cpp:
+ (JSC::NativeJITCode::addressForCall):
+ (JSC::JITCode::liveRegistersToPreserveAtExceptionHandlingCallSite):
+ * jit/JITCode.h:
+ * jit/JITInlineCacheGenerator.cpp:
+ (JSC::JITByIdGenerator::JITByIdGenerator):
+ (JSC::JITGetByIdGenerator::JITGetByIdGenerator):
+ (JSC::JITPutByIdGenerator::JITPutByIdGenerator):
+ * jit/JITInlineCacheGenerator.h:
+ (JSC::JITByIdGenerator::reportSlowPathCall):
+ * jit/JITPropertyAccess.cpp:
+ (JSC::JIT::emitGetByValWithCachedId):
+ (JSC::JIT::emitPutByValWithCachedId):
+ (JSC::JIT::emit_op_get_by_id):
+ (JSC::JIT::emit_op_put_by_id):
+ * jit/JITPropertyAccess32_64.cpp:
+ (JSC::JIT::emitGetByValWithCachedId):
+ (JSC::JIT::emitPutByValWithCachedId):
+ (JSC::JIT::emit_op_get_by_id):
+ (JSC::JIT::emit_op_put_by_id):
+ * jit/JITStubRoutine.h:
+ (JSC::JITStubRoutine::createSelfManagedRoutine):
+ (JSC::JITStubRoutine::aboutToDie):
+ * jit/RegisterSet.cpp:
+ (JSC::RegisterSet::webAssemblyCalleeSaveRegisters):
+ (JSC::RegisterSet::registersToNotSaveForCall):
+ (JSC::RegisterSet::allGPRs):
+ * jit/RegisterSet.h:
+ (JSC::RegisterSet::set):
+ (JSC::RegisterSet::clear):
+ * jit/ScratchRegisterAllocator.cpp:
+ (JSC::ScratchRegisterAllocator::allocateScratchGPR):
+ (JSC::ScratchRegisterAllocator::allocateScratchFPR):
+ (JSC::ScratchRegisterAllocator::preserveReusedRegistersByPushing):
+ (JSC::ScratchRegisterAllocator::restoreReusedRegistersByPopping):
+ (JSC::ScratchRegisterAllocator::usedRegistersForCall):
+ (JSC::ScratchRegisterAllocator::preserveUsedRegistersToScratchBufferForCall):
+ (JSC::ScratchRegisterAllocator::restoreUsedRegistersFromScratchBufferForCall):
+ (JSC::ScratchRegisterAllocator::preserveRegistersToStackForCall):
+ (JSC::ScratchRegisterAllocator::restoreRegistersFromStackForCall):
+ * jit/ScratchRegisterAllocator.h:
+ (JSC::ScratchRegisterAllocator::numberOfReusedRegisters):
+ (JSC::ScratchRegisterAllocator::usedRegisters):
+ * jsc.cpp:
+ (WTF::CustomGetter::CustomGetter):
+ (WTF::CustomGetter::createStructure):
+ (WTF::CustomGetter::create):
+ (WTF::CustomGetter::getOwnPropertySlot):
+ (WTF::CustomGetter::customGetter):
+ (WTF::Element::handleOwner):
+ (GlobalObject::finishCreation):
+ (functionCreateImpureGetter):
+ (functionCreateCustomGetterObject):
+ (functionSetImpureGetterDelegate):
+ * tests/stress/try-catch-custom-getter-as-get-by-id.js: Added.
+ (assert):
+ (bar):
+ (foo):
+ * tests/stress/try-catch-getter-as-get-by-id-register-restoration.js: Added.
+ (assert):
+ (o1.get f):
+ (bar):
+ (foo):
+ * tests/stress/try-catch-getter-as-get-by-id.js: Added.
+ (assert):
+ (o1.get f):
+ (bar):
+ (foo):
+ * tests/stress/try-catch-setter-as-put-by-id.js: Added.
+ (assert):
+ (o1.set f):
+ (bar):
+ (foo):
+ * tests/stress/try-catch-stub-routine-replaced.js: Added.
+ (assert):
+ (arr):
+ (hello):
+ (foo):
+ (objChain.get f):
+ (fakeOut.get f):
+ (o.get f):
+
2015-10-08 Commit Queue <commit-queue@webkit.org>
Unreviewed, rolling out r190716.
// destructors.
#if ENABLE(JIT)
- for (Bag<StructureStubInfo>::iterator iter = m_stubInfos.begin(); !!iter; ++iter)
- (*iter)->deref();
+ for (Bag<StructureStubInfo>::iterator iter = m_stubInfos.begin(); !!iter; ++iter) {
+ StructureStubInfo* stub = *iter;
+ stub->aboutToDie();
+ stub->deref();
+ }
#endif // ENABLE(JIT)
}
return 0;
}
+CallSiteIndex CodeBlock::newExceptionHandlingCallSiteIndex(CallSiteIndex originalCallSite)
+{
+#if ENABLE(DFG_JIT)
+ RELEASE_ASSERT(jitType() == JITCode::DFGJIT); // FIXME: When implementing FTL try/catch we should include that JITType here as well: https://bugs.webkit.org/show_bug.cgi?id=149409
+ RELEASE_ASSERT(canGetCodeOrigin(originalCallSite));
+ ASSERT(!!handlerForIndex(originalCallSite.bits()));
+ CodeOrigin originalOrigin = codeOrigin(originalCallSite);
+ return m_jitCode->dfgCommon()->addCodeOriginUnconditionally(originalOrigin);
+#else
+ // We never create new on-the-fly exception handling
+ // call sites outside the DFG/FTL inline caches.
+ RELEASE_ASSERT_NOT_REACHED();
+ return CallSiteIndex(0);
+#endif
+}
+
+void CodeBlock::removeExceptionHandlerForCallSite(CallSiteIndex callSiteIndex)
+{
+ RELEASE_ASSERT(m_rareData);
+ Vector<HandlerInfo>& exceptionHandlers = m_rareData->m_exceptionHandlers;
+ unsigned index = callSiteIndex.bits();
+ for (size_t i = 0; i < exceptionHandlers.size(); ++i) {
+ HandlerInfo& handler = exceptionHandlers[i];
+ if (handler.start <= index && handler.end > index) {
+ exceptionHandlers.remove(i);
+ return;
+ }
+ }
+
+ RELEASE_ASSERT_NOT_REACHED();
+}
+
unsigned CodeBlock::lineNumberForBytecodeOffset(unsigned bytecodeOffset)
{
RELEASE_ASSERT(bytecodeOffset < instructions().size());
};
HandlerInfo* handlerForBytecodeOffset(unsigned bytecodeOffset, RequiredHandler = RequiredHandler::AnyHandler);
HandlerInfo* handlerForIndex(unsigned, RequiredHandler = RequiredHandler::AnyHandler);
+ void removeExceptionHandlerForCallSite(CallSiteIndex);
unsigned lineNumberForBytecodeOffset(unsigned bytecodeOffset);
unsigned columnNumberForBytecodeOffset(unsigned bytecodeOffset);
void expressionRangeForBytecodeOffset(unsigned bytecodeOffset, int& divot,
m_rareData->m_exceptionHandlers.append(handler);
}
+ CallSiteIndex newExceptionHandlingCallSiteIndex(CallSiteIndex originalCallSite);
+
protected:
void finalizeLLIntInlineCaches();
void finalizeBaselineJITInlineCaches();
#include "JITOperations.h"
#include "JSCInlines.h"
#include "LinkBuffer.h"
+#include "MaxFrameExtentForSlowPathCall.h"
#include "ScratchRegisterAllocator.h"
#include "StructureStubClearingWatchpoint.h"
#include "StructureStubInfo.h"
static const bool verbose = false;
struct AccessGenerationState {
+ AccessGenerationState()
+ : m_calculatedRegistersForCallAndExceptionHandling(false)
+ , m_needsToRestoreRegistersIfException(false)
+ , m_calculatedCallSiteIndex(false)
+ {
+ }
CCallHelpers* jit { nullptr };
ScratchRegisterAllocator* allocator;
- size_t numberOfPaddingBytes { 0 };
+ unsigned numberOfBytesUsedToPreserveReusedRegisters { 0 };
PolymorphicAccess* access { nullptr };
StructureStubInfo* stubInfo { nullptr };
CCallHelpers::JumpList success;
void restoreScratch()
{
- allocator->restoreReusedRegistersByPopping(*jit, numberOfPaddingBytes);
+ allocator->restoreReusedRegistersByPopping(*jit, numberOfBytesUsedToPreserveReusedRegisters);
}
void succeed()
restoreScratch();
success.append(jit->jump());
}
+
+ void calculateLiveRegistersForCallAndExceptionHandling()
+ {
+ if (!m_calculatedRegistersForCallAndExceptionHandling) {
+ m_calculatedRegistersForCallAndExceptionHandling = true;
+
+ m_liveRegistersToPreserveAtExceptionHandlingCallSite = jit->codeBlock()->jitCode()->liveRegistersToPreserveAtExceptionHandlingCallSite(jit->codeBlock(), stubInfo->callSiteIndex);
+ m_needsToRestoreRegistersIfException = m_liveRegistersToPreserveAtExceptionHandlingCallSite.numberOfSetRegisters() > 0;
+ if (m_needsToRestoreRegistersIfException)
+ RELEASE_ASSERT(JITCode::isOptimizingJIT(jit->codeBlock()->jitType()));
+
+ m_liveRegistersForCall = RegisterSet(m_liveRegistersToPreserveAtExceptionHandlingCallSite, allocator->usedRegisters());
+ m_liveRegistersForCall.exclude(RegisterSet::registersToNotSaveForCall());
+ }
+ }
+
+ void preserveLiveRegistersToStackForCall()
+ {
+ unsigned extraStackPadding = 0;
+ unsigned numberOfStackBytesUsedForRegisterPreservation = ScratchRegisterAllocator::preserveRegistersToStackForCall(*jit, liveRegistersForCall(), extraStackPadding);
+ if (m_numberOfStackBytesUsedForRegisterPreservation != std::numeric_limits<unsigned>::max())
+ RELEASE_ASSERT(numberOfStackBytesUsedForRegisterPreservation == m_numberOfStackBytesUsedForRegisterPreservation);
+ m_numberOfStackBytesUsedForRegisterPreservation = numberOfStackBytesUsedForRegisterPreservation;
+ }
+
+ void restoreLiveRegistersFromStackForCall(bool isGetter)
+ {
+ RegisterSet dontRestore;
+ if (isGetter) {
+ // This is the result value. We don't want to overwrite the result with what we stored to the stack.
+ // We sometimes have to store it to the stack just in case we throw an exception and need the original value.
+ dontRestore.set(valueRegs);
+ }
+ restoreLiveRegistersFromStackForCall(dontRestore);
+ }
+
+ void restoreLiveRegistersFromStackForCallWithThrownException()
+ {
+ // Even if we're a getter, we don't want to ignore the result value like we normally do
+ // because the getter threw, and therefore, didn't return a value that means anything.
+ // Instead, we want to restore that register to what it was upon entering the getter
+ // inline cache. The subtlety here is if the base and the result are the same register,
+ // and the getter threw, we want OSR exit to see the original base value, not the result
+ // of the getter call.
+ RegisterSet dontRestore = liveRegistersForCall();
+ // As an optimization here, we only need to restore what is live for exception handling.
+ // We can construct the dontRestore set to accomplish this goal by having it contain only
+ // what is live for call but not live for exception handling. By ignoring things that are
+ // only live at the call but not the exception handler, we will only restore things live
+ // at the exception handler.
+ dontRestore.exclude(liveRegistersToPreserveAtExceptionHandlingCallSite());
+ restoreLiveRegistersFromStackForCall(dontRestore);
+ }
+
+ void restoreLiveRegistersFromStackForCall(const RegisterSet& dontRestore)
+ {
+ unsigned extraStackPadding = 0;
+ ScratchRegisterAllocator::restoreRegistersFromStackForCall(*jit, liveRegistersForCall(), dontRestore, m_numberOfStackBytesUsedForRegisterPreservation, extraStackPadding);
+ }
+
+ const RegisterSet& liveRegistersForCall()
+ {
+ RELEASE_ASSERT(m_calculatedRegistersForCallAndExceptionHandling);
+ return m_liveRegistersForCall;
+ }
+
+ CallSiteIndex callSiteIndexForExceptionHandlingOrOriginal()
+ {
+ RELEASE_ASSERT(m_calculatedRegistersForCallAndExceptionHandling);
+
+ if (!m_calculatedCallSiteIndex) {
+ m_calculatedCallSiteIndex = true;
+
+ if (m_needsToRestoreRegistersIfException)
+ m_callSiteIndex = jit->codeBlock()->newExceptionHandlingCallSiteIndex(stubInfo->callSiteIndex);
+ else
+ m_callSiteIndex = originalCallSiteIndex();
+ }
+
+ return m_callSiteIndex;
+ }
+
+ CallSiteIndex callSiteIndexForExceptionHandling()
+ {
+ RELEASE_ASSERT(m_calculatedRegistersForCallAndExceptionHandling);
+ RELEASE_ASSERT(m_needsToRestoreRegistersIfException);
+ RELEASE_ASSERT(m_calculatedCallSiteIndex);
+ return m_callSiteIndex;
+ }
+
+ const HandlerInfo& originalExceptionHandler() const
+ {
+ RELEASE_ASSERT(m_needsToRestoreRegistersIfException);
+ HandlerInfo* exceptionHandler = jit->codeBlock()->handlerForIndex(stubInfo->callSiteIndex.bits());
+ RELEASE_ASSERT(exceptionHandler);
+ return *exceptionHandler;
+ }
+
+ unsigned numberOfStackBytesUsedForRegisterPreservation() const
+ {
+ RELEASE_ASSERT(m_calculatedRegistersForCallAndExceptionHandling);
+ return m_numberOfStackBytesUsedForRegisterPreservation;
+ }
+
+ bool needsToRestoreRegistersIfException() const { return m_needsToRestoreRegistersIfException; }
+ CallSiteIndex originalCallSiteIndex() const { return stubInfo->callSiteIndex; }
+
+private:
+ const RegisterSet& liveRegistersToPreserveAtExceptionHandlingCallSite()
+ {
+ RELEASE_ASSERT(m_calculatedRegistersForCallAndExceptionHandling);
+ return m_liveRegistersToPreserveAtExceptionHandlingCallSite;
+ }
+
+ RegisterSet m_liveRegistersToPreserveAtExceptionHandlingCallSite;
+ RegisterSet m_liveRegistersForCall;
+ CallSiteIndex m_callSiteIndex { CallSiteIndex(std::numeric_limits<unsigned>::max()) };
+ unsigned m_numberOfStackBytesUsedForRegisterPreservation { std::numeric_limits<unsigned>::max() };
+ bool m_calculatedRegistersForCallAndExceptionHandling : 1;
+ bool m_needsToRestoreRegistersIfException : 1;
+ bool m_calculatedCallSiteIndex : 1;
};
AccessCase::AccessCase()
#endif
}
- // Stuff for custom getters.
+ if (m_type == Load) {
+ state.succeed();
+ return;
+ }
+
+ // Stuff for custom getters/setters.
CCallHelpers::Call operationCall;
- CCallHelpers::Call handlerCall;
+ CCallHelpers::Call lookupExceptionHandlerCall;
- // Stuff for JS getters.
+ // Stuff for JS getters/setters.
CCallHelpers::DataLabelPtr addressOfLinkFunctionCheck;
CCallHelpers::Call fastPathCall;
CCallHelpers::Call slowPathCall;
CCallHelpers::Jump success;
CCallHelpers::Jump fail;
- if (m_type != Load && m_type != Miss) {
- // Need to make sure that whenever this call is made in the future, we remember the
- // place that we made it from.
- jit.store32(
- CCallHelpers::TrustedImm32(stubInfo.callSiteIndex.bits()),
- CCallHelpers::tagFor(static_cast<VirtualRegister>(JSStack::ArgumentCount)));
-
- if (m_type == Getter || m_type == Setter) {
- // Create a JS call using a JS call inline cache. Assume that:
- //
- // - SP is aligned and represents the extent of the calling compiler's stack usage.
- //
- // - FP is set correctly (i.e. it points to the caller's call frame header).
- //
- // - SP - FP is an aligned difference.
- //
- // - Any byte between FP (exclusive) and SP (inclusive) could be live in the calling
- // code.
- //
- // Therefore, we temporarily grow the stack for the purpose of the call and then
- // shrink it after.
-
- RELEASE_ASSERT(!m_rareData->callLinkInfo);
- m_rareData->callLinkInfo = std::make_unique<CallLinkInfo>();
-
- // FIXME: If we generated a polymorphic call stub that jumped back to the getter
- // stub, which then jumped back to the main code, then we'd have a reachability
- // situation that the GC doesn't know about. The GC would ensure that the polymorphic
- // call stub stayed alive, and it would ensure that the main code stayed alive, but
- // it wouldn't know that the getter stub was alive. Ideally JIT stub routines would
- // be GC objects, and then we'd be able to say that the polymorphic call stub has a
- // reference to the getter stub.
- // https://bugs.webkit.org/show_bug.cgi?id=148914
- m_rareData->callLinkInfo->disallowStubs();
-
- m_rareData->callLinkInfo->setUpCall(
- CallLinkInfo::Call, stubInfo.codeOrigin, loadedValueGPR);
-
- CCallHelpers::JumpList done;
-
- // There is a "this" argument.
- unsigned numberOfParameters = 1;
- // ... and a value argument if we're calling a setter.
- if (m_type == Setter)
- numberOfParameters++;
-
- // Get the accessor; if there ain't one then the result is jsUndefined().
- if (m_type == Setter) {
- jit.loadPtr(
- CCallHelpers::Address(loadedValueGPR, GetterSetter::offsetOfSetter()),
- loadedValueGPR);
- } else {
- jit.loadPtr(
- CCallHelpers::Address(loadedValueGPR, GetterSetter::offsetOfGetter()),
- loadedValueGPR);
- }
- CCallHelpers::Jump returnUndefined = jit.branchTestPtr(
- CCallHelpers::Zero, loadedValueGPR);
+ // This also does the necessary calculations of whether or not we're an
+ // exception handling call site.
+ state.calculateLiveRegistersForCallAndExceptionHandling();
+ state.preserveLiveRegistersToStackForCall();
- unsigned numberOfRegsForCall = JSStack::CallFrameHeaderSize + numberOfParameters;
+ // Need to make sure that whenever this call is made in the future, we remember the
+ // place that we made it from.
+ jit.store32(
+ CCallHelpers::TrustedImm32(state.callSiteIndexForExceptionHandlingOrOriginal().bits()),
+ CCallHelpers::tagFor(static_cast<VirtualRegister>(JSStack::ArgumentCount)));
+
+ if (m_type == Getter || m_type == Setter) {
+ // Create a JS call using a JS call inline cache. Assume that:
+ //
+ // - SP is aligned and represents the extent of the calling compiler's stack usage.
+ //
+ // - FP is set correctly (i.e. it points to the caller's call frame header).
+ //
+ // - SP - FP is an aligned difference.
+ //
+ // - Any byte between FP (exclusive) and SP (inclusive) could be live in the calling
+ // code.
+ //
+ // Therefore, we temporarily grow the stack for the purpose of the call and then
+ // shrink it after.
+
+ RELEASE_ASSERT(!m_rareData->callLinkInfo);
+ m_rareData->callLinkInfo = std::make_unique<CallLinkInfo>();
+
+ // FIXME: If we generated a polymorphic call stub that jumped back to the getter
+ // stub, which then jumped back to the main code, then we'd have a reachability
+ // situation that the GC doesn't know about. The GC would ensure that the polymorphic
+ // call stub stayed alive, and it would ensure that the main code stayed alive, but
+ // it wouldn't know that the getter stub was alive. Ideally JIT stub routines would
+ // be GC objects, and then we'd be able to say that the polymorphic call stub has a
+ // reference to the getter stub.
+ // https://bugs.webkit.org/show_bug.cgi?id=148914
+ m_rareData->callLinkInfo->disallowStubs();
+
+ m_rareData->callLinkInfo->setUpCall(
+ CallLinkInfo::Call, stubInfo.codeOrigin, loadedValueGPR);
- unsigned numberOfBytesForCall =
- numberOfRegsForCall * sizeof(Register) + sizeof(CallerFrameAndPC);
+ CCallHelpers::JumpList done;
- unsigned alignedNumberOfBytesForCall =
- WTF::roundUpToMultipleOf(stackAlignmentBytes(), numberOfBytesForCall);
+ // There is a "this" argument.
+ unsigned numberOfParameters = 1;
+ // ... and a value argument if we're calling a setter.
+ if (m_type == Setter)
+ numberOfParameters++;
- jit.subPtr(
- CCallHelpers::TrustedImm32(alignedNumberOfBytesForCall),
- CCallHelpers::stackPointerRegister);
+ // Get the accessor; if there ain't one then the result is jsUndefined().
+ if (m_type == Setter) {
+ jit.loadPtr(
+ CCallHelpers::Address(loadedValueGPR, GetterSetter::offsetOfSetter()),
+ loadedValueGPR);
+ } else {
+ jit.loadPtr(
+ CCallHelpers::Address(loadedValueGPR, GetterSetter::offsetOfGetter()),
+ loadedValueGPR);
+ }
- CCallHelpers::Address calleeFrame = CCallHelpers::Address(
- CCallHelpers::stackPointerRegister,
- -static_cast<ptrdiff_t>(sizeof(CallerFrameAndPC)));
+ CCallHelpers::Jump returnUndefined = jit.branchTestPtr(
+ CCallHelpers::Zero, loadedValueGPR);
- jit.store32(
- CCallHelpers::TrustedImm32(numberOfParameters),
- calleeFrame.withOffset(JSStack::ArgumentCount * sizeof(Register) + PayloadOffset));
+ unsigned numberOfRegsForCall = JSStack::CallFrameHeaderSize + numberOfParameters;
- jit.storeCell(
- loadedValueGPR, calleeFrame.withOffset(JSStack::Callee * sizeof(Register)));
+ unsigned numberOfBytesForCall =
+ numberOfRegsForCall * sizeof(Register) + sizeof(CallerFrameAndPC);
- jit.storeCell(
- baseForGetGPR,
- calleeFrame.withOffset(virtualRegisterForArgument(0).offset() * sizeof(Register)));
+ unsigned alignedNumberOfBytesForCall =
+ WTF::roundUpToMultipleOf(stackAlignmentBytes(), numberOfBytesForCall);
- if (m_type == Setter) {
- jit.storeValue(
- valueRegs,
- calleeFrame.withOffset(
- virtualRegisterForArgument(1).offset() * sizeof(Register)));
- }
+ jit.subPtr(
+ CCallHelpers::TrustedImm32(alignedNumberOfBytesForCall),
+ CCallHelpers::stackPointerRegister);
- CCallHelpers::Jump slowCase = jit.branchPtrWithPatch(
- CCallHelpers::NotEqual, loadedValueGPR, addressOfLinkFunctionCheck,
- CCallHelpers::TrustedImmPtr(0));
+ CCallHelpers::Address calleeFrame = CCallHelpers::Address(
+ CCallHelpers::stackPointerRegister,
+ -static_cast<ptrdiff_t>(sizeof(CallerFrameAndPC)));
- fastPathCall = jit.nearCall();
+ jit.store32(
+ CCallHelpers::TrustedImm32(numberOfParameters),
+ calleeFrame.withOffset(JSStack::ArgumentCount * sizeof(Register) + PayloadOffset));
- jit.addPtr(
- CCallHelpers::TrustedImm32(alignedNumberOfBytesForCall),
- CCallHelpers::stackPointerRegister);
- if (m_type == Getter)
- jit.setupResults(valueRegs);
+ jit.storeCell(
+ loadedValueGPR, calleeFrame.withOffset(JSStack::Callee * sizeof(Register)));
- done.append(jit.jump());
- slowCase.link(&jit);
+ jit.storeCell(
+ baseForGetGPR,
+ calleeFrame.withOffset(virtualRegisterForArgument(0).offset() * sizeof(Register)));
- jit.move(loadedValueGPR, GPRInfo::regT0);
+ if (m_type == Setter) {
+ jit.storeValue(
+ valueRegs,
+ calleeFrame.withOffset(
+ virtualRegisterForArgument(1).offset() * sizeof(Register)));
+ }
+
+ CCallHelpers::Jump slowCase = jit.branchPtrWithPatch(
+ CCallHelpers::NotEqual, loadedValueGPR, addressOfLinkFunctionCheck,
+ CCallHelpers::TrustedImmPtr(0));
+
+ fastPathCall = jit.nearCall();
+ if (m_type == Getter)
+ jit.setupResults(valueRegs);
+ done.append(jit.jump());
+
+ slowCase.link(&jit);
+ jit.move(loadedValueGPR, GPRInfo::regT0);
#if USE(JSVALUE32_64)
- // We *always* know that the getter/setter, if non-null, is a cell.
- jit.move(CCallHelpers::TrustedImm32(JSValue::CellTag), GPRInfo::regT1);
+ // We *always* know that the getter/setter, if non-null, is a cell.
+ jit.move(CCallHelpers::TrustedImm32(JSValue::CellTag), GPRInfo::regT1);
#endif
- jit.move(CCallHelpers::TrustedImmPtr(m_rareData->callLinkInfo.get()), GPRInfo::regT2);
- slowPathCall = jit.nearCall();
-
- jit.addPtr(
- CCallHelpers::TrustedImm32(alignedNumberOfBytesForCall),
- CCallHelpers::stackPointerRegister);
- if (m_type == Getter)
- jit.setupResults(valueRegs);
+ jit.move(CCallHelpers::TrustedImmPtr(m_rareData->callLinkInfo.get()), GPRInfo::regT2);
+ slowPathCall = jit.nearCall();
+ if (m_type == Getter)
+ jit.setupResults(valueRegs);
+ done.append(jit.jump());
- done.append(jit.jump());
- returnUndefined.link(&jit);
+ returnUndefined.link(&jit);
+ if (m_type == Getter)
+ jit.moveTrustedValue(jsUndefined(), valueRegs);
- if (m_type == Getter)
- jit.moveTrustedValue(jsUndefined(), valueRegs);
+ done.link(&jit);
- done.link(&jit);
+ jit.addPtr(CCallHelpers::TrustedImm32((jit.codeBlock()->stackPointerOffset() * sizeof(Register)) - state.numberOfBytesUsedToPreserveReusedRegisters - state.numberOfStackBytesUsedForRegisterPreservation()),
+ GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister);
+ state.restoreLiveRegistersFromStackForCall(isGetter());
- jit.addPtr(
- CCallHelpers::TrustedImm32(
- jit.codeBlock()->stackPointerOffset() * sizeof(Register)),
- GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister);
+ state.callbacks.append(
+ [=, &vm] (LinkBuffer& linkBuffer) {
+ m_rareData->callLinkInfo->setCallLocations(
+ linkBuffer.locationOfNearCall(slowPathCall),
+ linkBuffer.locationOf(addressOfLinkFunctionCheck),
+ linkBuffer.locationOfNearCall(fastPathCall));
+
+ linkBuffer.link(
+ slowPathCall,
+ CodeLocationLabel(vm.getCTIStub(linkCallThunkGenerator).code()));
+ });
+ } else {
+ unsigned stackOffset = 0;
+ // Need to make room for the C call so our spillage isn't overwritten.
+ if (state.numberOfStackBytesUsedForRegisterPreservation()) {
+ if (maxFrameExtentForSlowPathCall)
+ stackOffset = WTF::roundUpToMultipleOf(stackAlignmentBytes(), maxFrameExtentForSlowPathCall);
+ }
+ if (stackOffset) {
+ jit.subPtr(
+ CCallHelpers::TrustedImm32(stackOffset),
+ CCallHelpers::stackPointerRegister);
+ }
- state.callbacks.append(
- [=, &vm] (LinkBuffer& linkBuffer) {
- m_rareData->callLinkInfo->setCallLocations(
- linkBuffer.locationOfNearCall(slowPathCall),
- linkBuffer.locationOf(addressOfLinkFunctionCheck),
- linkBuffer.locationOfNearCall(fastPathCall));
-
- linkBuffer.link(
- slowPathCall,
- CodeLocationLabel(vm.getCTIStub(linkCallThunkGenerator).code()));
- });
- } else {
- // getter: EncodedJSValue (*GetValueFunc)(ExecState*, JSObject* slotBase, EncodedJSValue thisValue, PropertyName);
- // setter: void (*PutValueFunc)(ExecState*, JSObject* base, EncodedJSValue thisObject, EncodedJSValue value);
+ // getter: EncodedJSValue (*GetValueFunc)(ExecState*, JSObject* slotBase, EncodedJSValue thisValue, PropertyName);
+ // setter: void (*PutValueFunc)(ExecState*, JSObject* base, EncodedJSValue thisObject, EncodedJSValue value);
#if USE(JSVALUE64)
- if (m_type == CustomGetter) {
- jit.setupArgumentsWithExecState(
- baseForAccessGPR, baseForGetGPR,
- CCallHelpers::TrustedImmPtr(ident.impl()));
- } else
- jit.setupArgumentsWithExecState(baseForAccessGPR, baseForGetGPR, valueRegs.gpr());
+ if (m_type == CustomGetter) {
+ jit.setupArgumentsWithExecState(
+ baseForAccessGPR, baseForGetGPR,
+ CCallHelpers::TrustedImmPtr(ident.impl()));
+ } else
+ jit.setupArgumentsWithExecState(baseForAccessGPR, baseForGetGPR, valueRegs.gpr());
#else
- if (m_type == CustomGetter) {
- jit.setupArgumentsWithExecState(
- baseForAccessGPR, baseForGetGPR,
- CCallHelpers::TrustedImm32(JSValue::CellTag),
- CCallHelpers::TrustedImmPtr(ident.impl()));
- } else {
- jit.setupArgumentsWithExecState(
- baseForAccessGPR, baseForGetGPR,
- CCallHelpers::TrustedImm32(JSValue::CellTag),
- valueRegs.payloadGPR(), valueRegs.tagGPR());
- }
+ if (m_type == CustomGetter) {
+ jit.setupArgumentsWithExecState(
+ baseForAccessGPR, baseForGetGPR,
+ CCallHelpers::TrustedImm32(JSValue::CellTag),
+ CCallHelpers::TrustedImmPtr(ident.impl()));
+ } else {
+ jit.setupArgumentsWithExecState(
+ baseForAccessGPR, baseForGetGPR,
+ CCallHelpers::TrustedImm32(JSValue::CellTag),
+ valueRegs.payloadGPR(), valueRegs.tagGPR());
+ }
#endif
- jit.storePtr(GPRInfo::callFrameRegister, &vm.topCallFrame);
+ jit.storePtr(GPRInfo::callFrameRegister, &vm.topCallFrame);
- operationCall = jit.call();
- if (m_type == CustomGetter)
- jit.setupResults(valueRegs);
- CCallHelpers::Jump noException =
- jit.emitExceptionCheck(CCallHelpers::InvertedExceptionCheck);
+ operationCall = jit.call();
+ if (m_type == CustomGetter)
+ jit.setupResults(valueRegs);
- jit.copyCalleeSavesToVMCalleeSavesBuffer();
- jit.setupArguments(CCallHelpers::TrustedImmPtr(&vm), GPRInfo::callFrameRegister);
- handlerCall = jit.call();
- jit.jumpToExceptionHandler();
-
- noException.link(&jit);
+ if (stackOffset) {
+ jit.addPtr(
+ CCallHelpers::TrustedImm32(stackOffset),
+ CCallHelpers::stackPointerRegister);
+ }
+ CCallHelpers::Jump noException =
+ jit.emitExceptionCheck(CCallHelpers::InvertedExceptionCheck);
+
+ bool didSetLookupExceptionHandler = false;
+ state.restoreLiveRegistersFromStackForCallWithThrownException();
+ state.restoreScratch();
+ jit.copyCalleeSavesToVMCalleeSavesBuffer();
+ if (state.needsToRestoreRegistersIfException()) {
+ // To the JIT that produces the original exception handling
+ // call site, they will expect the OSR exit to be arrived
+ // at from genericUnwind. Therefore we must model what genericUnwind
+ // does here. I.e, set callFrameForCatch and copy callee saves.
+
+ jit.storePtr(GPRInfo::callFrameRegister, vm.addressOfCallFrameForCatch());
+ CCallHelpers::Jump jumpToOSRExitExceptionHandler = jit.jump();
+
+ // We don't need to insert a new exception handler in the table
+ // because we're doing a manual exception check here. i.e, we'll
+ // never arrive here from genericUnwind().
+ HandlerInfo originalHandler = state.originalExceptionHandler();
state.callbacks.append(
[=] (LinkBuffer& linkBuffer) {
- linkBuffer.link(operationCall, FunctionPtr(m_rareData->customAccessor.opaque));
- linkBuffer.link(handlerCall, lookupExceptionHandler);
+ linkBuffer.link(jumpToOSRExitExceptionHandler, originalHandler.nativeCode);
});
+ } else {
+ jit.setupArguments(CCallHelpers::TrustedImmPtr(&vm), GPRInfo::callFrameRegister);
+ lookupExceptionHandlerCall = jit.call();
+ didSetLookupExceptionHandler = true;
+ jit.jumpToExceptionHandler();
}
+
+ noException.link(&jit);
+ state.restoreLiveRegistersFromStackForCall(isGetter());
+
+ state.callbacks.append(
+ [=] (LinkBuffer& linkBuffer) {
+ linkBuffer.link(operationCall, FunctionPtr(m_rareData->customAccessor.opaque));
+ if (didSetLookupExceptionHandler)
+ linkBuffer.link(lookupExceptionHandlerCall, lookupExceptionHandler);
+ });
}
state.succeed();
return;
else
scratchGPR3 = InvalidGPRReg;
- size_t numberOfPaddingBytes = allocator.preserveReusedRegistersByPushing(jit);
+ size_t numberOfBytesUsedToPreserveReusedRegisters = allocator.preserveReusedRegistersByPushing(jit);
ASSERT(structure()->transitionWatchpointSetHasBeenInvalidated());
});
}
- allocator.restoreReusedRegistersByPopping(jit, numberOfPaddingBytes);
+ allocator.restoreReusedRegistersByPopping(jit, numberOfBytesUsedToPreserveReusedRegisters);
state.succeed();
if (newStructure()->outOfLineCapacity() != structure()->outOfLineCapacity()) {
slowPath.link(&jit);
- allocator.restoreReusedRegistersByPopping(jit, numberOfPaddingBytes);
+ allocator.restoreReusedRegistersByPopping(jit, numberOfBytesUsedToPreserveReusedRegisters);
allocator.preserveUsedRegistersToScratchBufferForCall(jit, scratchBuffer, scratchGPR);
#if USE(JSVALUE64)
jit.setupArgumentsWithExecState(
if (found)
continue;
- if (myCase->doesCalls() && stubInfo.patch.spillMode == NeedToSpill)
- return MacroAssemblerCodePtr();
-
casesToAdd.append(WTF::move(myCase));
}
CCallHelpers jit(&vm, codeBlock);
state.jit = &jit;
- state.numberOfPaddingBytes = allocator.preserveReusedRegistersByPushing(jit);
+ state.numberOfBytesUsedToPreserveReusedRegisters = allocator.preserveReusedRegistersByPushing(jit);
bool allGuardedByStructureCheck = true;
- for (auto& entry : cases)
+ bool hasJSGetterSetterCall = false;
+ for (auto& entry : cases) {
allGuardedByStructureCheck &= entry->guardedByStructureCheck();
+ if (entry->type() == AccessCase::Getter || entry->type() == AccessCase::Setter)
+ hasJSGetterSetterCall = true;
+ }
if (cases.isEmpty()) {
// This is super unlikely, but we make it legal anyway.
failure = state.failAndRepatch;
failure.append(jit.jump());
+ if (state.needsToRestoreRegistersIfException() && hasJSGetterSetterCall) {
+ // Emit the exception handler.
+ // Note that this code is only reachable when doing genericUnwind from a pure JS getter/setter .
+ // Note also that this is not reachable from custom getter/setter. Custom getter/setters will have
+ // their own exception handling logic that doesn't go through genericUnwind.
+ MacroAssembler::Label makeshiftCatchHandler = jit.label();
+
+ int stackPointerOffset = codeBlock->stackPointerOffset() * sizeof(EncodedJSValue);
+ stackPointerOffset -= state.numberOfBytesUsedToPreserveReusedRegisters;
+ stackPointerOffset -= state.numberOfStackBytesUsedForRegisterPreservation();
+
+ jit.loadPtr(vm.addressOfCallFrameForCatch(), GPRInfo::callFrameRegister);
+ jit.addPtr(CCallHelpers::TrustedImm32(stackPointerOffset), GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister);
+
+ state.restoreLiveRegistersFromStackForCallWithThrownException();
+ state.restoreScratch();
+ CCallHelpers::Jump jumpToOSRExitExceptionHandler = jit.jump();
+
+ HandlerInfo oldHandler = state.originalExceptionHandler();
+ CallSiteIndex newExceptionHandlingCallSite = state.callSiteIndexForExceptionHandling();
+ state.callbacks.append(
+ [=] (LinkBuffer& linkBuffer) {
+ linkBuffer.link(jumpToOSRExitExceptionHandler, oldHandler.nativeCode);
+
+ HandlerInfo handlerToRegister = oldHandler;
+ handlerToRegister.nativeCode = linkBuffer.locationOf(makeshiftCatchHandler);
+ handlerToRegister.start = newExceptionHandlingCallSite.bits();
+ handlerToRegister.end = newExceptionHandlingCallSite.bits() + 1;
+ codeBlock->appendExceptionHandler(handlerToRegister);
+ });
+ }
+
LinkBuffer linkBuffer(vm, jit, codeBlock, JITCompilationCanFail);
if (linkBuffer.didFailToAllocate()) {
if (verbose)
if (verbose)
dataLog(*codeBlock, " ", stubInfo.codeOrigin, ": Generating polymorphic access stub for ", listDump(cases), "\n");
-
+
MacroAssemblerCodeRef code = FINALIZE_CODE_FOR(
codeBlock, linkBuffer,
("%s", toCString("Access stub for ", *codeBlock, " ", stubInfo.codeOrigin, " with return point ", successLabel, ": ", listDump(cases)).data()));
for (auto& entry : cases)
doesCalls |= entry->doesCalls();
- m_stubRoutine = createJITStubRoutine(code, vm, codeBlock, doesCalls);
+ CodeBlock* codeBlockThatOwnsExceptionHandlers = nullptr;
+ CallSiteIndex callSiteIndexForExceptionHandling = state.originalCallSiteIndex();
+ if (state.needsToRestoreRegistersIfException()) {
+ codeBlockThatOwnsExceptionHandlers = codeBlock;
+ ASSERT(JITCode::isOptimizingJIT(codeBlockThatOwnsExceptionHandlers->jitType()));
+ callSiteIndexForExceptionHandling = state.callSiteIndexForExceptionHandling();
+ }
+
+ m_stubRoutine = createJITStubRoutine(code, vm, codeBlock, doesCalls, nullptr, codeBlockThatOwnsExceptionHandlers, callSiteIndexForExceptionHandling);
m_watchpoints = WTF::move(state.watchpoints);
if (!state.weakReferences.isEmpty())
m_weakReferences = std::make_unique<Vector<WriteBarrier<JSCell>>>(WTF::move(state.weakReferences));
return code.code();
}
+void PolymorphicAccess::aboutToDie()
+{
+ m_stubRoutine->aboutToDie();
+}
+
} // namespace JSC
namespace WTF {
}
}
+ bool isGetter() const
+ {
+ switch (type()) {
+ case Getter:
+ case CustomGetter:
+ return true;
+ default:
+ return false;
+ }
+ }
+
CallLinkInfo* callLinkInfo() const
{
if (!m_rareData)
// If this returns false then we are requesting a reset of the owning StructureStubInfo.
bool visitWeak(VM&) const;
+ void aboutToDie();
+
void dump(PrintStream& out) const;
private:
RELEASE_ASSERT_NOT_REACHED();
}
+void StructureStubInfo::aboutToDie()
+{
+ switch (cacheType) {
+ case CacheType::Stub:
+ u.stub->aboutToDie();
+ return;
+ case CacheType::Unset:
+ case CacheType::GetByIdSelf:
+ case CacheType::PutByIdReplace:
+ return;
+ }
+
+ RELEASE_ASSERT_NOT_REACHED();
+}
+
MacroAssemblerCodePtr StructureStubInfo::addAccessCase(
CodeBlock* codeBlock, const Identifier& ident, std::unique_ptr<AccessCase> accessCase)
{
#include "Options.h"
#include "PolymorphicAccess.h"
#include "RegisterSet.h"
-#include "SpillRegistersMode.h"
#include "Structure.h"
#include "StructureStubClearingWatchpoint.h"
void reset(CodeBlock*);
void deref();
+ void aboutToDie();
// Check if the stub has weak references that are dead. If it does, then it resets itself,
// either entirely or just enough to ensure that those dead pointers don't get used anymore.
} u;
struct {
- unsigned spillMode : 8;
int8_t baseGPR;
#if USE(JSVALUE32_64)
int8_t valueTagGPR;
{
return isInGPR();
}
-#endif
+#endif // USE(JSVALUE32_64)
MacroAssembler::FPRegisterID fpr() const
{
return CallSiteIndex(index);
}
+CallSiteIndex CommonData::addCodeOriginUnconditionally(CodeOrigin codeOrigin)
+{
+ if (callSiteIndexFreeList.size())
+ return CallSiteIndex(callSiteIndexFreeList.takeAny());
+
+ codeOrigins.append(codeOrigin);
+ unsigned index = codeOrigins.size() - 1;
+ ASSERT(codeOrigins[index] == codeOrigin);
+ return CallSiteIndex(index);
+}
+
CallSiteIndex CommonData::lastCallSite() const
{
RELEASE_ASSERT(codeOrigins.size());
return CallSiteIndex(codeOrigins.size() - 1);
}
+void CommonData::removeCallSiteIndex(CallSiteIndex callSite)
+{
+ RELEASE_ASSERT(callSite.bits() < codeOrigins.size());
+ callSiteIndexFreeList.add(callSite.bits());
+}
+
void CommonData::shrinkToFit()
{
codeOrigins.shrinkToFit();
void notifyCompilingStructureTransition(Plan&, CodeBlock*, Node*);
CallSiteIndex addCodeOrigin(CodeOrigin);
+ CallSiteIndex addCodeOriginUnconditionally(CodeOrigin);
CallSiteIndex lastCallSite() const;
+ void removeCallSiteIndex(CallSiteIndex);
void shrinkToFit();
unsigned frameRegisterCount;
unsigned requiredRegisterCountForExit;
+
+private:
+ HashSet<unsigned, WTF::IntHash<unsigned>, WTF::UnsignedWithZeroKeyHashTraits<unsigned>> callSiteIndexFreeList;
+
};
} } // namespace JSC::DFG
result[i] = recoveries[i].recover(exec);
}
+RegisterSet JITCode::liveRegistersToPreserveAtExceptionHandlingCallSite(CodeBlock* codeBlock, CallSiteIndex callSiteIndex)
+{
+ for (OSRExit& exit : osrExit) {
+ if (exit.m_isExceptionHandler && exit.m_exceptionHandlerCallSiteIndex.bits() == callSiteIndex.bits()) {
+ Operands<ValueRecovery> valueRecoveries;
+ reconstruct(codeBlock, exit.m_codeOrigin, exit.m_streamIndex, valueRecoveries);
+ RegisterSet liveAtOSRExit;
+ for (size_t index = 0; index < valueRecoveries.size(); ++index) {
+ const ValueRecovery& recovery = valueRecoveries[index];
+ if (recovery.isInRegisters()) {
+ if (recovery.isInGPR())
+ liveAtOSRExit.set(recovery.gpr());
+ else if (recovery.isInFPR())
+ liveAtOSRExit.set(recovery.fpr());
+#if USE(JSVALUE32_64)
+ else if (recovery.isInJSValueRegs()) {
+ liveAtOSRExit.set(recovery.payloadGPR());
+ liveAtOSRExit.set(recovery.tagGPR());
+ }
+#endif
+ else
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+ }
+
+ return liveAtOSRExit;
+ }
+ }
+
+ return RegisterSet();
+}
+
#if ENABLE(FTL_JIT)
bool JITCode::checkIfOptimizationThresholdReached(CodeBlock* codeBlock)
{
void shrinkToFit();
+ RegisterSet liveRegistersToPreserveAtExceptionHandlingCallSite(CodeBlock*, CallSiteIndex) override;
#if ENABLE(FTL_JIT)
CodeBlock* osrEntryBlock() { return m_osrEntryBlock.get(); }
void setOSREntryBlock(VM& vm, const JSCell* owner, CodeBlock* osrEntryBlock) { m_osrEntryBlock.set(vm, owner, osrEntryBlock); }
exit.m_willArriveAtOSRExitFromGenericUnwind = jumpsToFail.empty(); // If jumps are empty, we're going to jump here from genericUnwind from a child call frame.
exit.m_isExceptionHandler = true;
exit.m_codeOrigin = opCatchOrigin;
+ exit.m_exceptionHandlerCallSiteIndex = callSite;
OSRExitCompilationInfo& exitInfo = appendExitInfo(jumpsToFail);
jitCode()->appendOSRExit(exit);
m_exceptionHandlerOSRExitCallSites.append(ExceptionHandlingOSRExitInfo { exitInfo, *exceptionHandler, callSite });
, m_patchableCodeOffset(0)
, m_recoveryIndex(recoveryIndex)
, m_streamIndex(streamIndex)
+ , m_exceptionHandlerCallSiteIndex(std::numeric_limits<unsigned>::max())
, m_willArriveAtOSRExitFromGenericUnwind(false)
, m_isExceptionHandler(false)
{
void correctJump(LinkBuffer&);
unsigned m_streamIndex;
+ CallSiteIndex m_exceptionHandlerCallSiteIndex;
bool m_willArriveAtOSRExitFromGenericUnwind : 1;
bool m_isExceptionHandler : 1;
stubInfo->patch.valueTagGPR = static_cast<int8_t>(InvalidGPRReg);
#endif
stubInfo->patch.usedRegisters = usedRegisters();
- stubInfo->patch.spillMode = NeedToSpill;
m_jit.addIn(InRecord(jump, done, slowPath.get(), stubInfo));
addSlowPathGenerator(WTF::move(slowPath));
basePayloadGPR = resultPayloadGPR;
}
+ RegisterSet usedRegisters = this->usedRegisters();
+ if (spillMode == DontSpill) {
+ // We've already flushed registers to the stack, we don't need to spill these.
+ usedRegisters.set(JSValueRegs(baseTagGPROrNone, basePayloadGPR), false);
+ usedRegisters.set(JSValueRegs(resultTagGPR, resultPayloadGPR), false);
+ }
+
CallSiteIndex callSite = m_jit.recordCallSiteAndGenerateExceptionHandlingOSRExitIfNeeded(codeOrigin, m_stream->size());
JITGetByIdGenerator gen(
- m_jit.codeBlock(), codeOrigin, callSite, usedRegisters(),
+ m_jit.codeBlock(), codeOrigin, callSite, usedRegisters,
JSValueRegs(baseTagGPROrNone, basePayloadGPR),
- JSValueRegs(resultTagGPR, resultPayloadGPR), spillMode);
+ JSValueRegs(resultTagGPR, resultPayloadGPR));
gen.generateFastPath(m_jit);
void SpeculativeJIT::cachedPutById(CodeOrigin codeOrigin, GPRReg basePayloadGPR, GPRReg valueTagGPR, GPRReg valuePayloadGPR, GPRReg scratchGPR, unsigned identifierNumber, PutKind putKind, JITCompiler::Jump slowPathTarget, SpillRegistersMode spillMode)
{
+ RegisterSet usedRegisters = this->usedRegisters();
+ if (spillMode == DontSpill) {
+ // We've already flushed registers to the stack, we don't need to spill these.
+ usedRegisters.set(basePayloadGPR, false);
+ usedRegisters.set(JSValueRegs(valueTagGPR, valuePayloadGPR), false);
+ }
CallSiteIndex callSite = m_jit.recordCallSiteAndGenerateExceptionHandlingOSRExitIfNeeded(codeOrigin, m_stream->size());
JITPutByIdGenerator gen(
- m_jit.codeBlock(), codeOrigin, callSite, usedRegisters(),
+ m_jit.codeBlock(), codeOrigin, callSite, usedRegisters,
JSValueRegs::payloadOnly(basePayloadGPR), JSValueRegs(valueTagGPR, valuePayloadGPR),
- scratchGPR, spillMode, m_jit.ecmaModeFor(codeOrigin), putKind);
+ scratchGPR, m_jit.ecmaModeFor(codeOrigin), putKind);
gen.generateFastPath(m_jit);
void SpeculativeJIT::cachedGetById(CodeOrigin codeOrigin, GPRReg baseGPR, GPRReg resultGPR, unsigned identifierNumber, JITCompiler::Jump slowPathTarget, SpillRegistersMode spillMode)
{
CallSiteIndex callSite = m_jit.recordCallSiteAndGenerateExceptionHandlingOSRExitIfNeeded(codeOrigin, m_stream->size());
-
+ RegisterSet usedRegisters = this->usedRegisters();
+ if (spillMode == DontSpill) {
+ // We've already flushed registers to the stack, we don't need to spill these.
+ usedRegisters.set(baseGPR, false);
+ usedRegisters.set(resultGPR, false);
+ }
JITGetByIdGenerator gen(
- m_jit.codeBlock(), codeOrigin, callSite, usedRegisters(), JSValueRegs(baseGPR),
- JSValueRegs(resultGPR), spillMode);
+ m_jit.codeBlock(), codeOrigin, callSite, usedRegisters, JSValueRegs(baseGPR),
+ JSValueRegs(resultGPR));
gen.generateFastPath(m_jit);
JITCompiler::JumpList slowCases;
void SpeculativeJIT::cachedPutById(CodeOrigin codeOrigin, GPRReg baseGPR, GPRReg valueGPR, GPRReg scratchGPR, unsigned identifierNumber, PutKind putKind, JITCompiler::Jump slowPathTarget, SpillRegistersMode spillMode)
{
CallSiteIndex callSite = m_jit.recordCallSiteAndGenerateExceptionHandlingOSRExitIfNeeded(codeOrigin, m_stream->size());
+ RegisterSet usedRegisters = this->usedRegisters();
+ if (spillMode == DontSpill) {
+ // We've already flushed registers to the stack, we don't need to spill these.
+ usedRegisters.set(baseGPR, false);
+ usedRegisters.set(valueGPR, false);
+ }
JITPutByIdGenerator gen(
- m_jit.codeBlock(), codeOrigin, callSite, usedRegisters(), JSValueRegs(baseGPR),
- JSValueRegs(valueGPR), scratchGPR, spillMode, m_jit.ecmaModeFor(codeOrigin), putKind);
+ m_jit.codeBlock(), codeOrigin, callSite, usedRegisters, JSValueRegs(baseGPR),
+ JSValueRegs(valueGPR), scratchGPR, m_jit.ecmaModeFor(codeOrigin), putKind);
gen.generateFastPath(m_jit);
JITGetByIdGenerator gen(
codeBlock, codeOrigin, getById.callSiteIndex(), usedRegisters, JSValueRegs(base),
- JSValueRegs(result), NeedToSpill);
+ JSValueRegs(result));
MacroAssembler::Label begin = slowPathJIT.label();
JITPutByIdGenerator gen(
codeBlock, codeOrigin, putById.callSiteIndex(), usedRegisters, JSValueRegs(base),
- JSValueRegs(value), GPRInfo::patchpointScratchRegister, NeedToSpill,
- putById.ecmaMode(), putById.putKind());
+ JSValueRegs(value), GPRInfo::patchpointScratchRegister, putById.ecmaMode(), putById.putKind());
MacroAssembler::Label begin = slowPathJIT.label();
stubInfo->patch.baseGPR = static_cast<int8_t>(obj);
stubInfo->patch.valueGPR = static_cast<int8_t>(result);
stubInfo->patch.usedRegisters = usedRegisters;
- stubInfo->patch.spillMode = NeedToSpill;
MacroAssembler::Label begin = slowPathJIT.label();
exit.validateReferences(trackedReferences);
}
+RegisterSet JITCode::liveRegistersToPreserveAtExceptionHandlingCallSite(CodeBlock*, CallSiteIndex)
+{
+ // FIXME: implement this when FTL implements try/catch.
+ // https://bugs.webkit.org/show_bug.cgi?id=149409
+ return RegisterSet();
+}
+
} } // namespace JSC::FTL
#endif // ENABLE(FTL_JIT)
void initializeAddressForCall(CodePtr);
void validateReferences(const TrackedReferences&) override;
+
+ RegisterSet liveRegistersToPreserveAtExceptionHandlingCallSite(CodeBlock*, CallSiteIndex) override;
const Vector<RefPtr<ExecutableMemoryHandle>>& handles() const { return m_handles; }
const Vector<RefPtr<DataSection>>& dataSections() const { return m_dataSections; }
#if ENABLE(JIT)
+#include "CodeBlock.h"
+#include "DFGCommonData.h"
#include "Heap.h"
#include "VM.h"
#include "JSCInlines.h"
{
vm.heap.m_jitStubRoutines.add(this);
}
-
+
GCAwareJITStubRoutine::~GCAwareJITStubRoutine() { }
void GCAwareJITStubRoutine::observeZeroRefCount()
visitor.append(&m_object);
}
+
+GCAwareJITStubRoutineWithExceptionHandler::GCAwareJITStubRoutineWithExceptionHandler(
+ const MacroAssemblerCodeRef& code, VM& vm,
+ CodeBlock* codeBlockForExceptionHandlers, CallSiteIndex exceptionHandlerCallSiteIndex)
+ : GCAwareJITStubRoutine(code, vm)
+ , m_codeBlockWithExceptionHandler(codeBlockForExceptionHandlers)
+ , m_exceptionHandlerCallSiteIndex(exceptionHandlerCallSiteIndex)
+{
+ RELEASE_ASSERT(m_codeBlockWithExceptionHandler);
+}
+
+void GCAwareJITStubRoutineWithExceptionHandler::aboutToDie()
+{
+ m_codeBlockWithExceptionHandler = nullptr;
+}
+
+GCAwareJITStubRoutineWithExceptionHandler::~GCAwareJITStubRoutineWithExceptionHandler()
+{
+ if (m_codeBlockWithExceptionHandler) {
+ m_codeBlockWithExceptionHandler->jitCode()->dfgCommon()->removeCallSiteIndex(m_exceptionHandlerCallSiteIndex);
+ m_codeBlockWithExceptionHandler->removeExceptionHandlerForCallSite(m_exceptionHandlerCallSiteIndex);
+ }
+}
+
+
PassRefPtr<JITStubRoutine> createJITStubRoutine(
const MacroAssemblerCodeRef& code,
VM& vm,
const JSCell* owner,
bool makesCalls,
- JSCell* object)
+ JSCell* object,
+ CodeBlock* codeBlockForExceptionHandlers,
+ CallSiteIndex exceptionHandlerCallSiteIndex)
{
if (!makesCalls)
return adoptRef(new JITStubRoutine(code));
+ if (codeBlockForExceptionHandlers) {
+ RELEASE_ASSERT(!object); // We're not a marking stub routine.
+ RELEASE_ASSERT(JITCode::isOptimizingJIT(codeBlockForExceptionHandlers->jitType()));
+ return static_pointer_cast<JITStubRoutine>(
+ adoptRef(new GCAwareJITStubRoutineWithExceptionHandler(code, vm, codeBlockForExceptionHandlers, exceptionHandlerCallSiteIndex)));
+ }
+
if (!object) {
return static_pointer_cast<JITStubRoutine>(
adoptRef(new GCAwareJITStubRoutine(code, vm)));
WriteBarrier<JSCell> m_object;
};
+
+// The stub has exception handlers in it. So it clears itself from exception
+// handling table when it dies. It also frees space in CodeOrigin table
+// for new exception handlers to use the same CallSiteIndex.
+class GCAwareJITStubRoutineWithExceptionHandler : public GCAwareJITStubRoutine {
+public:
+ GCAwareJITStubRoutineWithExceptionHandler(const MacroAssemblerCodeRef&, VM&, CodeBlock*, CallSiteIndex);
+ ~GCAwareJITStubRoutineWithExceptionHandler() override;
+
+ void aboutToDie() override;
+
+private:
+ CodeBlock* m_codeBlockWithExceptionHandler;
+ CallSiteIndex m_exceptionHandlerCallSiteIndex;
+};
+
// Helper for easily creating a GC-aware JIT stub routine. For the varargs,
// pass zero or more JSCell*'s. This will either create a JITStubRoutine, a
// GCAwareJITStubRoutine, or an ObjectMarkingGCAwareJITStubRoutine as
PassRefPtr<JITStubRoutine> createJITStubRoutine(
const MacroAssemblerCodeRef&, VM&, const JSCell* owner, bool makesCalls,
- JSCell* = nullptr);
+ JSCell* = nullptr,
+ CodeBlock* codeBlockForExceptionHandlers = nullptr, CallSiteIndex exceptionHandlingCallSiteIndex = CallSiteIndex(std::numeric_limits<unsigned>::max()));
// Helper for the creation of simple stub routines that need no help from the GC. Note
// that codeBlock gets "executed" more than once.
return m_ref.code();
}
+RegisterSet JITCode::liveRegistersToPreserveAtExceptionHandlingCallSite(CodeBlock*, CallSiteIndex)
+{
+ return RegisterSet();
+}
+
} // namespace JSC
namespace WTF {
#include "JITStubs.h"
#include "JSCJSValue.h"
#include "MacroAssemblerCodeRef.h"
+#include "RegisterSet.h"
namespace JSC {
virtual bool contains(void*) = 0;
+ virtual RegisterSet liveRegistersToPreserveAtExceptionHandlingCallSite(CodeBlock*, CallSiteIndex);
+
private:
JITType m_jitType;
};
JITByIdGenerator::JITByIdGenerator(
CodeBlock* codeBlock, CodeOrigin codeOrigin, CallSiteIndex callSite, AccessType accessType,
- const RegisterSet& usedRegisters, JSValueRegs base, JSValueRegs value,
- SpillRegistersMode spillMode)
+ const RegisterSet& usedRegisters, JSValueRegs base, JSValueRegs value)
: JITInlineCacheGenerator(codeBlock, codeOrigin, callSite, accessType)
, m_base(base)
, m_value(value)
{
- m_stubInfo->patch.spillMode = spillMode;
m_stubInfo->patch.usedRegisters = usedRegisters;
- // This is a convenience - in cases where the only registers you're using are base/value,
- // it allows you to pass RegisterSet() as the usedRegisters argument.
- m_stubInfo->patch.usedRegisters.set(base);
- m_stubInfo->patch.usedRegisters.set(value);
-
m_stubInfo->patch.baseGPR = static_cast<int8_t>(base.payloadGPR());
m_stubInfo->patch.valueGPR = static_cast<int8_t>(value.payloadGPR());
#if USE(JSVALUE32_64)
JITGetByIdGenerator::JITGetByIdGenerator(
CodeBlock* codeBlock, CodeOrigin codeOrigin, CallSiteIndex callSite, const RegisterSet& usedRegisters,
- JSValueRegs base, JSValueRegs value, SpillRegistersMode spillMode)
+ JSValueRegs base, JSValueRegs value)
: JITByIdGenerator(
- codeBlock, codeOrigin, callSite, AccessType::Get, usedRegisters, base, value, spillMode)
+ codeBlock, codeOrigin, callSite, AccessType::Get, usedRegisters, base, value)
{
RELEASE_ASSERT(base.payloadGPR() != value.tagGPR());
}
JITPutByIdGenerator::JITPutByIdGenerator(
CodeBlock* codeBlock, CodeOrigin codeOrigin, CallSiteIndex callSite, const RegisterSet& usedRegisters,
- JSValueRegs base, JSValueRegs value, GPRReg scratch, SpillRegistersMode spillMode,
+ JSValueRegs base, JSValueRegs value, GPRReg scratch,
ECMAMode ecmaMode, PutKind putKind)
: JITByIdGenerator(
- codeBlock, codeOrigin, callSite, AccessType::Put, usedRegisters, base, value, spillMode)
+ codeBlock, codeOrigin, callSite, AccessType::Put, usedRegisters, base, value)
, m_ecmaMode(ecmaMode)
, m_putKind(putKind)
{
JITByIdGenerator(
CodeBlock*, CodeOrigin, CallSiteIndex, AccessType, const RegisterSet&, JSValueRegs base,
- JSValueRegs value, SpillRegistersMode spillMode);
+ JSValueRegs value);
public:
void reportSlowPathCall(MacroAssembler::Label slowPathBegin, MacroAssembler::Call call)
JITGetByIdGenerator(
CodeBlock*, CodeOrigin, CallSiteIndex, const RegisterSet& usedRegisters, JSValueRegs base,
- JSValueRegs value, SpillRegistersMode spillMode);
+ JSValueRegs value);
void generateFastPath(MacroAssembler&);
};
JITPutByIdGenerator(
CodeBlock*, CodeOrigin, CallSiteIndex, const RegisterSet& usedRegisters, JSValueRegs base,
- JSValueRegs, GPRReg scratch, SpillRegistersMode spillMode, ECMAMode, PutKind);
+ JSValueRegs, GPRReg scratch, ECMAMode, PutKind);
void generateFastPath(MacroAssembler&);
JITGetByIdGenerator gen(
m_codeBlock, CodeOrigin(m_bytecodeOffset), CallSiteIndex(m_bytecodeOffset), RegisterSet::stubUnavailableRegisters(),
- JSValueRegs(regT0), JSValueRegs(regT0), DontSpill);
+ JSValueRegs(regT0), JSValueRegs(regT0));
gen.generateFastPath(*this);
fastDoneCase = jump();
JITPutByIdGenerator gen(
m_codeBlock, CodeOrigin(m_bytecodeOffset), CallSiteIndex(m_bytecodeOffset), RegisterSet::stubUnavailableRegisters(),
- JSValueRegs(regT0), JSValueRegs(regT1), regT2, DontSpill, m_codeBlock->ecmaMode(), putKind);
+ JSValueRegs(regT0), JSValueRegs(regT1), regT2, m_codeBlock->ecmaMode(), putKind);
gen.generateFastPath(*this);
doneCases.append(jump());
JITGetByIdGenerator gen(
m_codeBlock, CodeOrigin(m_bytecodeOffset), CallSiteIndex(m_bytecodeOffset), RegisterSet::stubUnavailableRegisters(),
- JSValueRegs(regT0), JSValueRegs(regT0), DontSpill);
+ JSValueRegs(regT0), JSValueRegs(regT0));
gen.generateFastPath(*this);
addSlowCase(gen.slowPathJump());
m_getByIds.append(gen);
JITPutByIdGenerator gen(
m_codeBlock, CodeOrigin(m_bytecodeOffset), CallSiteIndex(m_bytecodeOffset), RegisterSet::stubUnavailableRegisters(),
- JSValueRegs(regT0), JSValueRegs(regT1), regT2, DontSpill, m_codeBlock->ecmaMode(),
+ JSValueRegs(regT0), JSValueRegs(regT1), regT2, m_codeBlock->ecmaMode(),
direct ? Direct : NotDirect);
gen.generateFastPath(*this);
JITGetByIdGenerator gen(
m_codeBlock, CodeOrigin(m_bytecodeOffset), CallSiteIndex(currentInstruction), RegisterSet::stubUnavailableRegisters(),
- JSValueRegs::payloadOnly(regT0), JSValueRegs(regT1, regT0), DontSpill);
+ JSValueRegs::payloadOnly(regT0), JSValueRegs(regT1, regT0));
gen.generateFastPath(*this);
fastDoneCase = jump();
JITPutByIdGenerator gen(
m_codeBlock, CodeOrigin(m_bytecodeOffset), CallSiteIndex(currentInstruction), RegisterSet::stubUnavailableRegisters(),
- JSValueRegs::payloadOnly(regT0), JSValueRegs(regT3, regT2), regT1, DontSpill, m_codeBlock->ecmaMode(), putKind);
+ JSValueRegs::payloadOnly(regT0), JSValueRegs(regT3, regT2), regT1, m_codeBlock->ecmaMode(), putKind);
gen.generateFastPath(*this);
doneCases.append(jump());
JITGetByIdGenerator gen(
m_codeBlock, CodeOrigin(m_bytecodeOffset), CallSiteIndex(currentInstruction), RegisterSet::stubUnavailableRegisters(),
- JSValueRegs::payloadOnly(regT0), JSValueRegs(regT1, regT0), DontSpill);
+ JSValueRegs::payloadOnly(regT0), JSValueRegs(regT1, regT0));
gen.generateFastPath(*this);
addSlowCase(gen.slowPathJump());
m_getByIds.append(gen);
JITPutByIdGenerator gen(
m_codeBlock, CodeOrigin(m_bytecodeOffset), CallSiteIndex(currentInstruction), RegisterSet::stubUnavailableRegisters(),
JSValueRegs::payloadOnly(regT0), JSValueRegs(regT3, regT2),
- regT1, DontSpill, m_codeBlock->ecmaMode(), direct ? Direct : NotDirect);
+ regT1, m_codeBlock->ecmaMode(), direct ? Direct : NotDirect);
gen.generateFastPath(*this);
addSlowCase(gen.slowPathJump());
}
virtual ~JITStubRoutine();
+ virtual void aboutToDie() { }
// MacroAssemblerCodeRef is copyable, but at the cost of reference
// counting churn. Returning a reference is a good way of reducing
}
#endif
+RegisterSet RegisterSet::registersToNotSaveForCall()
+{
+ return RegisterSet(RegisterSet::vmCalleeSaveRegisters(), RegisterSet::stackRegisters(), RegisterSet::reservedHardwareRegisters());
+}
+
RegisterSet RegisterSet::allGPRs()
{
RegisterSet result;
static RegisterSet allGPRs();
static RegisterSet allFPRs();
static RegisterSet allRegisters();
+
+ static RegisterSet registersToNotSaveForCall();
void set(Reg reg, bool value = true)
{
m_vector.set(reg.index(), value);
}
- void set(JSValueRegs regs)
+ void set(JSValueRegs regs, bool value = true)
{
if (regs.tagGPR() != InvalidGPRReg)
- set(regs.tagGPR());
- set(regs.payloadGPR());
+ set(regs.tagGPR(), value);
+ set(regs.payloadGPR(), value);
}
void clear(Reg reg)
GPRReg ScratchRegisterAllocator::allocateScratchGPR() { return allocateScratch<GPRInfo>(); }
FPRReg ScratchRegisterAllocator::allocateScratchFPR() { return allocateScratch<FPRInfo>(); }
-size_t ScratchRegisterAllocator::preserveReusedRegistersByPushing(MacroAssembler& jit)
+unsigned ScratchRegisterAllocator::preserveReusedRegistersByPushing(MacroAssembler& jit)
{
if (!didReuseRegisters())
return 0;
- size_t numberOfBytesPushed = 0;
-
+ RegisterSet registersToSpill;
for (unsigned i = 0; i < FPRInfo::numberOfRegisters; ++i) {
FPRReg reg = FPRInfo::toRegister(i);
- if (m_scratchRegisters.getFPRByIndex(i) && m_usedRegisters.get(reg)) {
- jit.pushToSave(reg);
- numberOfBytesPushed += sizeof(double);
- }
+ if (m_scratchRegisters.getFPRByIndex(i) && m_usedRegisters.get(reg))
+ registersToSpill.set(reg);
}
for (unsigned i = 0; i < GPRInfo::numberOfRegisters; ++i) {
GPRReg reg = GPRInfo::toRegister(i);
- if (m_scratchRegisters.getGPRByIndex(i) && m_usedRegisters.get(reg)) {
- jit.pushToSave(reg);
- numberOfBytesPushed += sizeof(uintptr_t);
- }
+ if (m_scratchRegisters.getGPRByIndex(i) && m_usedRegisters.get(reg))
+ registersToSpill.set(reg);
}
- size_t totalStackAdjustmentBytes = numberOfBytesPushed + maxFrameExtentForSlowPathCall;
- totalStackAdjustmentBytes = WTF::roundUpToMultipleOf(stackAlignmentBytes(), totalStackAdjustmentBytes);
-
- // FIXME: We shouldn't have to do this.
- // https://bugs.webkit.org/show_bug.cgi?id=149030
- size_t numberOfPaddingBytes = totalStackAdjustmentBytes - numberOfBytesPushed;
- jit.subPtr(MacroAssembler::TrustedImm32(numberOfPaddingBytes), MacroAssembler::stackPointerRegister);
+ unsigned extraStackBytesAtTopOfStack = maxFrameExtentForSlowPathCall;
+ unsigned stackAdjustmentSize = ScratchRegisterAllocator::preserveRegistersToStackForCall(jit, registersToSpill, extraStackBytesAtTopOfStack);
- return numberOfPaddingBytes;
+ return stackAdjustmentSize;
}
-void ScratchRegisterAllocator::restoreReusedRegistersByPopping(MacroAssembler& jit, size_t numberOfPaddingBytes)
+void ScratchRegisterAllocator::restoreReusedRegistersByPopping(MacroAssembler& jit, unsigned numberOfBytesUsedToPreserveReusedRegisters)
{
if (!didReuseRegisters())
return;
- jit.addPtr(MacroAssembler::TrustedImm32(numberOfPaddingBytes), MacroAssembler::stackPointerRegister);
-
+ RegisterSet registersToFill;
for (unsigned i = GPRInfo::numberOfRegisters; i--;) {
GPRReg reg = GPRInfo::toRegister(i);
if (m_scratchRegisters.getGPRByIndex(i) && m_usedRegisters.get(reg))
- jit.popToRestore(reg);
+ registersToFill.set(reg);
}
for (unsigned i = FPRInfo::numberOfRegisters; i--;) {
FPRReg reg = FPRInfo::toRegister(i);
if (m_scratchRegisters.getFPRByIndex(i) && m_usedRegisters.get(reg))
- jit.popToRestore(reg);
+ registersToFill.set(reg);
}
+
+ unsigned extraStackBytesAtTopOfStack = maxFrameExtentForSlowPathCall;
+ RegisterSet dontRestore; // Empty set. We want to restore everything.
+ ScratchRegisterAllocator::restoreRegistersFromStackForCall(jit, registersToFill, dontRestore, numberOfBytesUsedToPreserveReusedRegisters, extraStackBytesAtTopOfStack);
}
RegisterSet ScratchRegisterAllocator::usedRegistersForCall() const
{
RegisterSet result = m_usedRegisters;
- result.exclude(RegisterSet::calleeSaveRegisters());
- result.exclude(RegisterSet::stackRegisters());
- result.exclude(RegisterSet::reservedHardwareRegisters());
+ result.exclude(RegisterSet::registersToNotSaveForCall());
return result;
}
unsigned count = 0;
for (GPRReg reg = MacroAssembler::firstRegister(); reg <= MacroAssembler::lastRegister(); reg = MacroAssembler::nextRegister(reg)) {
- if (usedRegisters.get(reg))
- jit.storePtr(reg, static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer()) + (count++));
+ if (usedRegisters.get(reg)) {
+ jit.storePtr(reg, static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer()) + count);
+ count++;
+ }
if (GPRInfo::toIndex(reg) != GPRInfo::InvalidIndex
&& scratchGPR == InvalidGPRReg
&& !m_lockedRegisters.get(reg) && !m_scratchRegisters.get(reg))
RELEASE_ASSERT(scratchGPR != InvalidGPRReg);
for (FPRReg reg = MacroAssembler::firstFPRegister(); reg <= MacroAssembler::lastFPRegister(); reg = MacroAssembler::nextFPRegister(reg)) {
if (usedRegisters.get(reg)) {
- jit.move(MacroAssembler::TrustedImmPtr(static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer()) + (count++)), scratchGPR);
+ jit.move(MacroAssembler::TrustedImmPtr(static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer()) + count), scratchGPR);
+ count++;
jit.storeDouble(reg, scratchGPR);
}
}
}
}
+unsigned ScratchRegisterAllocator::preserveRegistersToStackForCall(MacroAssembler& jit, const RegisterSet& usedRegisters, unsigned extraBytesAtTopOfStack)
+{
+ RELEASE_ASSERT(extraBytesAtTopOfStack % sizeof(void*) == 0);
+ if (!usedRegisters.numberOfSetRegisters())
+ return 0;
+
+ unsigned stackOffset = (usedRegisters.numberOfSetRegisters()) * sizeof(EncodedJSValue);
+ stackOffset += extraBytesAtTopOfStack;
+ stackOffset = WTF::roundUpToMultipleOf(stackAlignmentBytes(), stackOffset);
+ jit.subPtr(
+ MacroAssembler::TrustedImm32(stackOffset),
+ MacroAssembler::stackPointerRegister);
+
+ unsigned count = 0;
+ for (GPRReg reg = MacroAssembler::firstRegister(); reg <= MacroAssembler::lastRegister(); reg = MacroAssembler::nextRegister(reg)) {
+ if (usedRegisters.get(reg)) {
+ jit.storePtr(reg, MacroAssembler::Address(MacroAssembler::stackPointerRegister, extraBytesAtTopOfStack + (count * sizeof(EncodedJSValue))));
+ count++;
+ }
+ }
+ for (FPRReg reg = MacroAssembler::firstFPRegister(); reg <= MacroAssembler::lastFPRegister(); reg = MacroAssembler::nextFPRegister(reg)) {
+ if (usedRegisters.get(reg)) {
+ jit.storeDouble(reg, MacroAssembler::Address(MacroAssembler::stackPointerRegister, extraBytesAtTopOfStack + (count * sizeof(EncodedJSValue))));
+ count++;
+ }
+ }
+
+ RELEASE_ASSERT(count == usedRegisters.numberOfSetRegisters());
+
+ return stackOffset;
+}
+
+void ScratchRegisterAllocator::restoreRegistersFromStackForCall(MacroAssembler& jit, const RegisterSet& usedRegisters, const RegisterSet& ignore, unsigned numberOfStackBytesUsedForRegisterPreservation, unsigned extraBytesAtTopOfStack)
+{
+ RELEASE_ASSERT(extraBytesAtTopOfStack % sizeof(void*) == 0);
+ if (!usedRegisters.numberOfSetRegisters()) {
+ RELEASE_ASSERT(numberOfStackBytesUsedForRegisterPreservation == 0);
+ return;
+ }
+
+ unsigned count = 0;
+ for (GPRReg reg = MacroAssembler::firstRegister(); reg <= MacroAssembler::lastRegister(); reg = MacroAssembler::nextRegister(reg)) {
+ if (usedRegisters.get(reg)) {
+ if (!ignore.get(reg))
+ jit.loadPtr(MacroAssembler::Address(MacroAssembler::stackPointerRegister, extraBytesAtTopOfStack + (sizeof(EncodedJSValue) * count)), reg);
+ count++;
+ }
+ }
+ for (FPRReg reg = MacroAssembler::firstFPRegister(); reg <= MacroAssembler::lastFPRegister(); reg = MacroAssembler::nextFPRegister(reg)) {
+ if (usedRegisters.get(reg)) {
+ if (!ignore.get(reg))
+ jit.loadDouble(MacroAssembler::Address(MacroAssembler::stackPointerRegister, extraBytesAtTopOfStack + (sizeof(EncodedJSValue) * count)), reg);
+ count++;
+ }
+ }
+
+ unsigned stackOffset = (usedRegisters.numberOfSetRegisters()) * sizeof(EncodedJSValue);
+ stackOffset += extraBytesAtTopOfStack;
+ stackOffset = WTF::roundUpToMultipleOf(stackAlignmentBytes(), stackOffset);
+
+ RELEASE_ASSERT(count == usedRegisters.numberOfSetRegisters());
+ RELEASE_ASSERT(stackOffset == numberOfStackBytesUsedForRegisterPreservation);
+
+ jit.addPtr(
+ MacroAssembler::TrustedImm32(stackOffset),
+ MacroAssembler::stackPointerRegister);
+}
+
} // namespace JSC
#endif // ENABLE(JIT)
{
return m_numberOfReusedRegisters;
}
+
+ RegisterSet usedRegisters() const { return m_usedRegisters; }
// preserveReusedRegistersByPushing() returns the number of padding bytes used to keep the stack
// pointer properly aligned and to reserve room for calling a C helper. This number of padding
// bytes must be provided to restoreReusedRegistersByPopping() in order to reverse the work done
// by preserveReusedRegistersByPushing().
- size_t preserveReusedRegistersByPushing(MacroAssembler& jit);
- void restoreReusedRegistersByPopping(MacroAssembler& jit, size_t numberOfPaddingBytes);
+ unsigned preserveReusedRegistersByPushing(MacroAssembler& jit);
+ void restoreReusedRegistersByPopping(MacroAssembler& jit, unsigned numberOfBytesUsedToPreserveReusedRegisters);
RegisterSet usedRegistersForCall() const;
unsigned desiredScratchBufferSizeForCall() const;
void preserveUsedRegistersToScratchBufferForCall(MacroAssembler& jit, ScratchBuffer* scratchBuffer, GPRReg scratchGPR = InvalidGPRReg);
-
void restoreUsedRegistersFromScratchBufferForCall(MacroAssembler& jit, ScratchBuffer* scratchBuffer, GPRReg scratchGPR = InvalidGPRReg);
-
+
+ static unsigned preserveRegistersToStackForCall(MacroAssembler& jit, const RegisterSet& usedRegisters, unsigned extraPaddingInBytes);
+ static void restoreRegistersFromStackForCall(MacroAssembler& jit, const RegisterSet& usedRegisters, const RegisterSet& ignore, unsigned numberOfStackBytesUsedForRegisterPreservation, unsigned extraPaddingInBytes);
+
private:
RegisterSet m_usedRegisters;
TempRegisterSet m_lockedRegisters;
WriteBarrier<JSObject> m_delegate;
};
+class CustomGetter : public JSNonFinalObject {
+public:
+ CustomGetter(VM& vm, Structure* structure)
+ : Base(vm, structure)
+ {
+ }
+
+ DECLARE_INFO;
+ typedef JSNonFinalObject Base;
+ static const unsigned StructureFlags = Base::StructureFlags | JSC::OverridesGetOwnPropertySlot;
+
+ static Structure* createStructure(VM& vm, JSGlobalObject* globalObject, JSValue prototype)
+ {
+ return Structure::create(vm, globalObject, prototype, TypeInfo(ObjectType, StructureFlags), info());
+ }
+
+ static CustomGetter* create(VM& vm, Structure* structure)
+ {
+ CustomGetter* getter = new (NotNull, allocateCell<CustomGetter>(vm.heap, sizeof(CustomGetter))) CustomGetter(vm, structure);
+ getter->finishCreation(vm);
+ return getter;
+ }
+
+ static bool getOwnPropertySlot(JSObject* object, ExecState* exec, PropertyName propertyName, PropertySlot& slot)
+ {
+ CustomGetter* thisObject = jsCast<CustomGetter*>(object);
+ if (propertyName == PropertyName(Identifier::fromString(exec, "customGetter"))) {
+ slot.setCacheableCustom(thisObject, DontDelete | ReadOnly | DontEnum, thisObject->customGetter);
+ return true;
+ }
+ return JSObject::getOwnPropertySlot(thisObject, exec, propertyName, slot);
+ }
+
+private:
+ static EncodedJSValue customGetter(ExecState* exec, JSObject*, EncodedJSValue thisValue, PropertyName)
+ {
+ CustomGetter* thisObject = jsDynamicCast<CustomGetter*>(JSValue::decode(thisValue));
+ if (!thisObject)
+ return throwVMTypeError(exec);
+ bool shouldThrow = thisObject->get(exec, PropertyName(Identifier::fromString(exec, "shouldThrow"))).toBoolean(exec);
+ if (shouldThrow)
+ return throwVMTypeError(exec);
+ return JSValue::encode(jsNumber(100));
+ }
+};
+
class RuntimeArray : public JSArray {
public:
typedef JSArray Base;
const ClassInfo Masquerader::s_info = { "Masquerader", &Base::s_info, 0, CREATE_METHOD_TABLE(Masquerader) };
const ClassInfo Root::s_info = { "Root", &Base::s_info, 0, CREATE_METHOD_TABLE(Root) };
const ClassInfo ImpureGetter::s_info = { "ImpureGetter", &Base::s_info, 0, CREATE_METHOD_TABLE(ImpureGetter) };
+const ClassInfo CustomGetter::s_info = { "CustomGetter", &Base::s_info, 0, CREATE_METHOD_TABLE(CustomGetter) };
const ClassInfo RuntimeArray::s_info = { "RuntimeArray", &Base::s_info, 0, CREATE_METHOD_TABLE(RuntimeArray) };
ElementHandleOwner* Element::handleOwner()
static EncodedJSValue JSC_HOST_CALL functionCreateProxy(ExecState*);
static EncodedJSValue JSC_HOST_CALL functionCreateRuntimeArray(ExecState*);
static EncodedJSValue JSC_HOST_CALL functionCreateImpureGetter(ExecState*);
+static EncodedJSValue JSC_HOST_CALL functionCreateCustomGetterObject(ExecState*);
static EncodedJSValue JSC_HOST_CALL functionSetImpureGetterDelegate(ExecState*);
static EncodedJSValue JSC_HOST_CALL functionSetElementRoot(ExecState*);
addFunction(vm, "createRuntimeArray", functionCreateRuntimeArray, 0);
addFunction(vm, "createImpureGetter", functionCreateImpureGetter, 1);
+ addFunction(vm, "createCustomGetterObject", functionCreateCustomGetterObject, 0);
addFunction(vm, "setImpureGetterDelegate", functionSetImpureGetterDelegate, 2);
addFunction(vm, "dumpTypesForAllVariables", functionDumpTypesForAllVariables , 0);
return JSValue::encode(result);
}
+EncodedJSValue JSC_HOST_CALL functionCreateCustomGetterObject(ExecState* exec)
+{
+ JSLockHolder lock(exec);
+ Structure* structure = CustomGetter::createStructure(exec->vm(), exec->lexicalGlobalObject(), jsNull());
+ CustomGetter* result = CustomGetter::create(exec->vm(), structure);
+ return JSValue::encode(result);
+}
+
EncodedJSValue JSC_HOST_CALL functionSetImpureGetterDelegate(ExecState* exec)
{
JSLockHolder lock(exec);
--- /dev/null
+function assert(b) {
+ if (!b) throw new Error("b");
+}
+noInline(assert);
+
+let i;
+var o1 = createCustomGetterObject();
+o1.shouldThrow = false;
+
+var o2 = {
+ customGetter: 40
+}
+
+var o3 = {
+ x: 100,
+ customGetter: 50
+}
+
+i = -1000;
+bar(i);
+foo(i);
+function bar(i) {
+ if (i === -1000)
+ return o1;
+
+ if (i % 2)
+ return o3;
+ else
+ return o2;
+}
+noInline(bar);
+
+function foo(i) {
+ var o = bar(i);
+ var v;
+ try {
+ v = o.customGetter;
+ } catch(e) {
+ assert(o === o1);
+ }
+}
+noInline(foo);
+
+foo(i);
+for (i = 0; i < 1000; i++)
+ foo(i);
+
+i = -1000;
+for (let j = 0; j < 1000; j++) {
+ if (j > 10)
+ o1.shouldThrow = true;
+ foo(i);
+}
--- /dev/null
+function assert(b) {
+ if (!b) throw new Error("bad value");
+}
+noInline(assert);
+
+let i;
+var o1 = {
+ get f() {
+ if (i === -1000)
+ throw new Error("hello");
+ return 20;
+ },
+ x: "x"
+};
+
+var o2 = {
+ f: 40
+}
+
+var o3 = {
+ x: 100,
+ f: "f"
+}
+
+function bar(i) {
+ if (i === -1000)
+ return o1;
+
+ if (i % 2)
+ return o3;
+ else
+ return o2;
+}
+noInline(bar);
+
+function foo(i) {
+ var o = bar(i);
+ let v;
+ let v2;
+ let v3;
+ try {
+ v2 = o.x;
+ v = o.f;
+ } catch(e) {
+ assert(v2 === "x");
+ assert(o === o1);
+ }
+}
+noInline(foo);
+
+foo(i);
+for (i = 0; i < 1000; i++)
+ foo(i);
+
+i = -1000;
+for (let j = 0; j < 1000; j++)
+ foo(i);
--- /dev/null
+function assert(b) {
+ if (!b) throw new Error("b");
+}
+noInline(assert);
+
+
+let i;
+var o1 = {
+ get f() {
+ if (i === -1000)
+ throw new Error("hello");
+ return 20;
+ }
+};
+
+var o2 = {
+ f: 40
+}
+
+var o3 = {
+ x: 100,
+ f: 50
+}
+
+function bar(i) {
+ if (i === -1000)
+ return o1;
+
+ if (i % 2)
+ return o3;
+ else
+ return o2;
+}
+noInline(bar);
+
+function foo(i) {
+ var o = bar(i);
+ var v;
+ try {
+ v = o.f
+ } catch(e) {
+ assert(o === o1);
+ }
+}
+noInline(foo);
+
+foo(i);
+for (i = 0; i < 1000; i++)
+ foo(i);
+
+i = -1000;
+for (let j = 0; j < 1000; j++)
+ foo(i);
--- /dev/null
+function assert(b) {
+ if (!b)
+ throw new Error("bad assertion");
+}
+noInline(assert);
+
+
+let i;
+var o1 = {
+ set f(v) {
+ if (i === -1000)
+ throw new Error("hello");
+ this._v = v;
+ }
+};
+
+var o2 = {
+ f: 40
+}
+
+var o3 = {
+ x: 100,
+ f: 50
+}
+
+function bar(i) {
+ if (i === -1000)
+ return o1;
+
+ if (i % 2)
+ return o3;
+ else
+ return o2;
+}
+noInline(bar);
+
+function foo(i) {
+ let o = bar(i);
+ let v = o.x;
+ try {
+ o.f = v;
+ } catch(e) {
+ assert(o === o1);
+ }
+}
+noInline(foo);
+
+foo(i);
+for (i = 0; i < 1000; i++)
+ foo(i);
+
+i = -1000;
+for (let j = 0; j < 1000; j++)
+ foo(i);
--- /dev/null
+// The main purpose of this test is to ensure that
+// we will re-use no longer in use CallSiteIndices for
+// inline cache stubs. See relevant code in destructor
+// which calls:
+// DFG::CommonData::removeCallSiteIndex(.)
+// CodeBlock::removeExceptionHandlerForCallSite(.)
+// Which add old call site indices to a free list.
+
+function assert(b) {
+ if (!b)
+ throw new Error("bad value");
+}
+noInline(assert);
+
+var arr = []
+function allocate() {
+ for (var i = 0; i < 10000; i++)
+ arr.push({});
+}
+
+function hello() { return 20; }
+noInline(hello);
+
+function foo(o) {
+ let baz = hello();
+ let v;
+ try {
+ v = o.f;
+ v = o.f;
+ v = o.f;
+ } catch(e) {
+ assert(baz === 20);
+ assert(v === 2); // Really flagCount.
+ }
+ return v;
+}
+noInline(foo);
+
+var objChain = {f: 40};
+var fakeOut = {x: 30, f: 100};
+for (let i = 0; i < 1000; i++)
+ foo(i % 2 ? objChain : fakeOut);
+
+var i;
+var flag = "flag";
+var flagCount = 0;
+objChain = {
+ get f() {
+ if (flagCount === 2)
+ throw new Error("I'm testing you.");
+ if (i === flag)
+ flagCount++;
+ return flagCount;
+ }
+};
+for (i = 0; i < 100; i++) {
+ allocate();
+ if (i === 99)
+ i = flag;
+ foo(objChain);
+}
+
+fakeOut = {x: 30, get f() { return 100}};
+for (i = 0; i < 100; i++) {
+ allocate();
+ if (i === 99)
+ i = flag;
+ foo(fakeOut);
+}
+
+var o = {
+ get f() {
+ return flagCount;
+ },
+ x: 100
+};
+
+for (i = 0; i < 100; i++)
+ foo(o);