jit/JITCode.cpp
jit/JITDisassembler.cpp
jit/JITExceptions.cpp
+ jit/JITInlineCacheGenerator.cpp
jit/JITOpcodes.cpp
jit/JITOpcodes32_64.cpp
jit/JITOperations.cpp
+2013-10-19 Filip Pizlo <fpizlo@apple.com>
+
+ Baseline JIT and DFG IC code generation should be unified and rationalized
+ https://bugs.webkit.org/show_bug.cgi?id=122939
+
+ Reviewed by Geoffrey Garen.
+
+ Introduce the JITInlineCacheGenerator, which takes a CodeBlock and a CodeOrigin plus
+ some register info and creates JIT inline caches for you. Used this to even furhter
+ unify the baseline and DFG ICs. In the future we can use this for FTL ICs. And my hope
+ is that we'll be able to use it for cascading ICs: an IC for some instruction may realize
+ that it needs to do the equivalent of get_by_id, so with this generator it will be able
+ to create an IC even though it wasn't associated with a get_by_id bytecode instruction.
+
+ * CMakeLists.txt:
+ * GNUmakefile.list.am:
+ * JavaScriptCore.vcxproj/JavaScriptCore.vcxproj:
+ * JavaScriptCore.xcodeproj/project.pbxproj:
+ * assembler/AbstractMacroAssembler.h:
+ (JSC::AbstractMacroAssembler::DataLabelCompact::label):
+ * bytecode/CodeBlock.h:
+ (JSC::CodeBlock::ecmaMode):
+ * dfg/DFGInlineCacheWrapper.h: Added.
+ (JSC::DFG::InlineCacheWrapper::InlineCacheWrapper):
+ * dfg/DFGInlineCacheWrapperInlines.h: Added.
+ (JSC::DFG::::finalize):
+ * dfg/DFGJITCompiler.cpp:
+ (JSC::DFG::JITCompiler::link):
+ * dfg/DFGJITCompiler.h:
+ (JSC::DFG::JITCompiler::addGetById):
+ (JSC::DFG::JITCompiler::addPutById):
+ * dfg/DFGSpeculativeJIT32_64.cpp:
+ (JSC::DFG::SpeculativeJIT::cachedGetById):
+ (JSC::DFG::SpeculativeJIT::cachedPutById):
+ * dfg/DFGSpeculativeJIT64.cpp:
+ (JSC::DFG::SpeculativeJIT::cachedGetById):
+ (JSC::DFG::SpeculativeJIT::cachedPutById):
+ (JSC::DFG::SpeculativeJIT::compile):
+ * jit/AssemblyHelpers.h:
+ (JSC::AssemblyHelpers::isStrictModeFor):
+ (JSC::AssemblyHelpers::strictModeFor):
+ * jit/GPRInfo.h:
+ (JSC::JSValueRegs::tagGPR):
+ * jit/JIT.cpp:
+ (JSC::JIT::JIT):
+ (JSC::JIT::privateCompileSlowCases):
+ (JSC::JIT::privateCompile):
+ * jit/JIT.h:
+ * jit/JITInlineCacheGenerator.cpp: Added.
+ (JSC::JITInlineCacheGenerator::JITInlineCacheGenerator):
+ (JSC::JITByIdGenerator::JITByIdGenerator):
+ (JSC::JITByIdGenerator::finalize):
+ (JSC::JITByIdGenerator::generateFastPathChecks):
+ (JSC::JITGetByIdGenerator::generateFastPath):
+ (JSC::JITPutByIdGenerator::JITPutByIdGenerator):
+ (JSC::JITPutByIdGenerator::generateFastPath):
+ (JSC::JITPutByIdGenerator::slowPathFunction):
+ * jit/JITInlineCacheGenerator.h: Added.
+ (JSC::JITInlineCacheGenerator::JITInlineCacheGenerator):
+ (JSC::JITInlineCacheGenerator::stubInfo):
+ (JSC::JITByIdGenerator::JITByIdGenerator):
+ (JSC::JITByIdGenerator::reportSlowPathCall):
+ (JSC::JITByIdGenerator::slowPathJump):
+ (JSC::JITGetByIdGenerator::JITGetByIdGenerator):
+ (JSC::JITPutByIdGenerator::JITPutByIdGenerator):
+ * jit/JITPropertyAccess.cpp:
+ (JSC::JIT::emit_op_get_by_id):
+ (JSC::JIT::emitSlow_op_get_by_id):
+ (JSC::JIT::emit_op_put_by_id):
+ (JSC::JIT::emitSlow_op_put_by_id):
+ * jit/JITPropertyAccess32_64.cpp:
+ (JSC::JIT::emit_op_get_by_id):
+ (JSC::JIT::emitSlow_op_get_by_id):
+ (JSC::JIT::emit_op_put_by_id):
+ (JSC::JIT::emitSlow_op_put_by_id):
+ * jit/RegisterSet.h:
+ (JSC::RegisterSet::set):
+
2013-10-19 Alexey Proskuryakov <ap@apple.com>
APICast.h uses functions from JSCJSValueInlines.h, but doesn't include it
Source/JavaScriptCore/dfg/DFGGraph.h \
Source/JavaScriptCore/dfg/DFGInPlaceAbstractState.cpp \
Source/JavaScriptCore/dfg/DFGInPlaceAbstractState.h \
+ Source/JavaScriptCore/dfg/DFGInlineCacheWrapper.h \
+ Source/JavaScriptCore/dfg/DFGInlineCacheWrapperInlines.h \
Source/JavaScriptCore/dfg/DFGInsertionSet.h \
Source/JavaScriptCore/dfg/DFGJITCode.cpp \
Source/JavaScriptCore/dfg/DFGJITCode.h \
Source/JavaScriptCore/jit/JIT.h \
Source/JavaScriptCore/jit/JITExceptions.cpp \
Source/JavaScriptCore/jit/JITExceptions.h \
+ Source/JavaScriptCore/jit/JITInlineCacheGenerator.cpp \
+ Source/JavaScriptCore/jit/JITInlineCacheGenerator.h \
Source/JavaScriptCore/jit/JITInlines.h \
Source/JavaScriptCore/jit/JITOpcodes32_64.cpp \
Source/JavaScriptCore/jit/JITOpcodes.cpp \
<ClCompile Include="..\jit\JITCode.cpp" />\r
<ClCompile Include="..\jit\JITDisassembler.cpp" />\r
<ClCompile Include="..\jit\JITExceptions.cpp" />\r
+ <ClCompile Include="..\jit\JITInlineCacheGenerator.cpp" />\r
<ClCompile Include="..\jit\JITOpcodes.cpp" />\r
<ClCompile Include="..\jit\JITOpcodes32_64.cpp" />\r
<ClCompile Include="..\jit\JITOperations.cpp" />\r
<ClInclude Include="..\jit\JITCompilationEffort.h" />\r
<ClInclude Include="..\jit\JITDisassembler.h" />\r
<ClInclude Include="..\jit\JITExceptions.h" />\r
+ <ClInclude Include="..\jit\JITInlineCacheGenerator.h" />\r
<ClInclude Include="..\jit\JITInlines.h" />\r
<ClInclude Include="..\jit\JITOperationWrappers.h" />\r
<ClInclude Include="..\jit\JITOperations.h" />\r
0FB1058C1675483300F8AB6E /* ProfilerOSRExit.h in Headers */ = {isa = PBXBuildFile; fileRef = 0FB105881675482E00F8AB6E /* ProfilerOSRExit.h */; settings = {ATTRIBUTES = (Private, ); }; };
0FB1058D1675483700F8AB6E /* ProfilerOSRExitSite.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0FB105891675482E00F8AB6E /* ProfilerOSRExitSite.cpp */; };
0FB1058E1675483A00F8AB6E /* ProfilerOSRExitSite.h in Headers */ = {isa = PBXBuildFile; fileRef = 0FB1058A1675482E00F8AB6E /* ProfilerOSRExitSite.h */; settings = {ATTRIBUTES = (Private, ); }; };
+ 0FB14E1E18124ACE009B6B4D /* JITInlineCacheGenerator.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0FB14E1C18124ACE009B6B4D /* JITInlineCacheGenerator.cpp */; };
+ 0FB14E1F18124ACE009B6B4D /* JITInlineCacheGenerator.h in Headers */ = {isa = PBXBuildFile; fileRef = 0FB14E1D18124ACE009B6B4D /* JITInlineCacheGenerator.h */; settings = {ATTRIBUTES = (Private, ); }; };
+ 0FB14E211812570B009B6B4D /* DFGInlineCacheWrapper.h in Headers */ = {isa = PBXBuildFile; fileRef = 0FB14E201812570B009B6B4D /* DFGInlineCacheWrapper.h */; settings = {ATTRIBUTES = (Private, ); }; };
+ 0FB14E2318130955009B6B4D /* DFGInlineCacheWrapperInlines.h in Headers */ = {isa = PBXBuildFile; fileRef = 0FB14E2218130955009B6B4D /* DFGInlineCacheWrapperInlines.h */; settings = {ATTRIBUTES = (Private, ); }; };
0FB5467714F59B5C002C2989 /* LazyOperandValueProfile.h in Headers */ = {isa = PBXBuildFile; fileRef = 0FB5467614F59AD1002C2989 /* LazyOperandValueProfile.h */; settings = {ATTRIBUTES = (Private, ); }; };
0FB5467914F5C46B002C2989 /* LazyOperandValueProfile.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0FB5467814F5C468002C2989 /* LazyOperandValueProfile.cpp */; };
0FB5467B14F5C7E1002C2989 /* MethodOfGettingAValueProfile.h in Headers */ = {isa = PBXBuildFile; fileRef = 0FB5467A14F5C7D4002C2989 /* MethodOfGettingAValueProfile.h */; settings = {ATTRIBUTES = (Private, ); }; };
0FB105881675482E00F8AB6E /* ProfilerOSRExit.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = ProfilerOSRExit.h; path = profiler/ProfilerOSRExit.h; sourceTree = "<group>"; };
0FB105891675482E00F8AB6E /* ProfilerOSRExitSite.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = ProfilerOSRExitSite.cpp; path = profiler/ProfilerOSRExitSite.cpp; sourceTree = "<group>"; };
0FB1058A1675482E00F8AB6E /* ProfilerOSRExitSite.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = ProfilerOSRExitSite.h; path = profiler/ProfilerOSRExitSite.h; sourceTree = "<group>"; };
+ 0FB14E1C18124ACE009B6B4D /* JITInlineCacheGenerator.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = JITInlineCacheGenerator.cpp; sourceTree = "<group>"; };
+ 0FB14E1D18124ACE009B6B4D /* JITInlineCacheGenerator.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = JITInlineCacheGenerator.h; sourceTree = "<group>"; };
+ 0FB14E201812570B009B6B4D /* DFGInlineCacheWrapper.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = DFGInlineCacheWrapper.h; path = dfg/DFGInlineCacheWrapper.h; sourceTree = "<group>"; };
+ 0FB14E2218130955009B6B4D /* DFGInlineCacheWrapperInlines.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = DFGInlineCacheWrapperInlines.h; path = dfg/DFGInlineCacheWrapperInlines.h; sourceTree = "<group>"; };
0FB4B51016B3A964003F696B /* DFGMinifiedID.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = DFGMinifiedID.h; path = dfg/DFGMinifiedID.h; sourceTree = "<group>"; };
0FB4B51916B62772003F696B /* DFGAllocator.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = DFGAllocator.h; path = dfg/DFGAllocator.h; sourceTree = "<group>"; };
0FB4B51A16B62772003F696B /* DFGCommon.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = DFGCommon.cpp; path = dfg/DFGCommon.cpp; sourceTree = "<group>"; };
0FAF7EFB165BA919000C8455 /* JITDisassembler.h */,
0F46807F14BA572700BFE272 /* JITExceptions.cpp */,
0F46808014BA572700BFE272 /* JITExceptions.h */,
+ 0FB14E1C18124ACE009B6B4D /* JITInlineCacheGenerator.cpp */,
+ 0FB14E1D18124ACE009B6B4D /* JITInlineCacheGenerator.h */,
86CC85A00EE79A4700288682 /* JITInlines.h */,
BCDD51E90FB8DF74004A8BDC /* JITOpcodes.cpp */,
A71236E41195F33C00BD2174 /* JITOpcodes32_64.cpp */,
86EC9DB31328DF44002B2AD7 /* dfg */ = {
isa = PBXGroup;
children = (
- 0F9D339417FFC4E60073C2BC /* DFGFlushedAt.cpp */,
- 0F9D339517FFC4E60073C2BC /* DFGFlushedAt.h */,
A77A423617A0BBFD00A8DB81 /* DFGAbstractHeap.cpp */,
A77A423717A0BBFD00A8DB81 /* DFGAbstractHeap.h */,
A704D8FE17A0BAA8006BA554 /* DFGAbstractInterpreter.h */,
A78A976F179738B8009DF744 /* DFGFinalizer.h */,
0F2BDC12151C5D4A00CD8910 /* DFGFixupPhase.cpp */,
0F2BDC13151C5D4A00CD8910 /* DFGFixupPhase.h */,
+ 0F9D339417FFC4E60073C2BC /* DFGFlushedAt.cpp */,
+ 0F9D339517FFC4E60073C2BC /* DFGFlushedAt.h */,
A7D89CE817A0B8CC00773AD8 /* DFGFlushFormat.cpp */,
A7D89CE917A0B8CC00773AD8 /* DFGFlushFormat.h */,
A7D89CEA17A0B8CC00773AD8 /* DFGFlushLivenessAnalysisPhase.cpp */,
86EC9DB61328DF82002B2AD7 /* DFGGenerationInfo.h */,
86EC9DB71328DF82002B2AD7 /* DFGGraph.cpp */,
86EC9DB81328DF82002B2AD7 /* DFGGraph.h */,
+ 0FB14E201812570B009B6B4D /* DFGInlineCacheWrapper.h */,
+ 0FB14E2218130955009B6B4D /* DFGInlineCacheWrapperInlines.h */,
A704D90017A0BAA8006BA554 /* DFGInPlaceAbstractState.cpp */,
A704D90117A0BAA8006BA554 /* DFGInPlaceAbstractState.h */,
0F2BDC1F151E803800CD8910 /* DFGInsertionSet.h */,
BC87CDB910712AD4000614CF /* JSONObject.lut.h in Headers */,
9534AAFB0E5B7A9600B8A45B /* JSProfilerPrivate.h in Headers */,
7C184E1B17BEDBD3007CB63A /* JSPromise.h in Headers */,
+ 0FB14E1F18124ACE009B6B4D /* JITInlineCacheGenerator.h in Headers */,
7C15F65E17C199CE00794D40 /* JSPromiseCallback.h in Headers */,
7C184E2317BEE240007CB63A /* JSPromiseConstructor.h in Headers */,
7C184E1F17BEE22E007CB63A /* JSPromisePrototype.h in Headers */,
0F2B670217B6B5AB00A7AE3F /* JSUint16Array.h in Headers */,
0F2B670317B6B5AB00A7AE3F /* JSUint32Array.h in Headers */,
0F2B670017B6B5AB00A7AE3F /* JSUint8Array.h in Headers */,
+ 0FB14E2318130955009B6B4D /* DFGInlineCacheWrapperInlines.h in Headers */,
0F2B670117B6B5AB00A7AE3F /* JSUint8ClampedArray.h in Headers */,
86E3C612167BABD7006D760A /* JSValue.h in Headers */,
+ 0FB14E211812570B009B6B4D /* DFGInlineCacheWrapper.h in Headers */,
86E3C61B167BABEE006D760A /* JSValueInternal.h in Headers */,
BC18C42C0E16F5CD00B34460 /* JSValueRef.h in Headers */,
BC18C42D0E16F5CD00B34460 /* JSVariableObject.h in Headers */,
0F235BD317178E1C00690C7F /* FTLExitArgument.cpp in Sources */,
0F235BD517178E1C00690C7F /* FTLExitArgumentForOperand.cpp in Sources */,
0F235BD817178E1C00690C7F /* FTLExitThunkGenerator.cpp in Sources */,
+ 0FB14E1E18124ACE009B6B4D /* JITInlineCacheGenerator.cpp in Sources */,
0F235BDA17178E1C00690C7F /* FTLExitValue.cpp in Sources */,
A7F2996B17A0BB670010417A /* FTLFail.cpp in Sources */,
0FEA0A281709623B00BB722C /* FTLIntrinsicRepository.cpp in Sources */,
{
}
+ AssemblerLabel label() const { return m_label; }
+
private:
AssemblerLabel m_label;
};
void printStructure(PrintStream&, const char* name, const Instruction*, int operand);
bool isStrictMode() const { return m_isStrictMode; }
+ ECMAMode ecmaMode() const { return isStrictMode() ? StrictMode : NotStrictMode; }
inline bool isKnownNotImmediate(int index)
{
--- /dev/null
+/*
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef DFGInlineCacheWrapper_h
+#define DFGInlineCacheWrapper_h
+
+#if ENABLE(DFG_JIT)
+
+#include "JITInlineCacheGenerator.h"
+
+namespace JSC { namespace DFG {
+
+class SlowPathGenerator;
+
+template<typename GeneratorType>
+struct InlineCacheWrapper {
+ InlineCacheWrapper() { }
+
+ InlineCacheWrapper(const GeneratorType& generator, SlowPathGenerator* slowPath)
+ : m_generator(generator)
+ , m_slowPath(slowPath)
+ {
+ }
+
+ void finalize(LinkBuffer&);
+
+ GeneratorType m_generator;
+ SlowPathGenerator* m_slowPath;
+};
+
+} } // namespace JSC::DFG
+
+#endif // ENABLE(DFG_JIT)
+
+#endif // DFGInlineCacheWrapper_h
+
+
--- /dev/null
+/*
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef DFGInlineCacheWrapperInlines_h
+#define DFGInlineCacheWrapperInlines_h
+
+#if ENABLE(DFG_JIT)
+
+#include "DFGInlineCacheWrapper.h"
+#include "DFGSlowPathGenerator.h"
+
+namespace JSC { namespace DFG {
+
+template<typename GeneratorType>
+void InlineCacheWrapper<GeneratorType>::finalize(LinkBuffer& linkBuffer)
+{
+ m_generator.reportSlowPathCall(m_slowPath->label(), m_slowPath->call());
+ m_generator.finalize(linkBuffer);
+}
+
+} } // namespace JSC::DFG
+
+#endif // ENABLE(DFG_JIT)
+
+#endif // DFGInlineCacheWrapperInlines_h
+
#include "CodeBlock.h"
#include "DFGFailedFinalizer.h"
+#include "DFGInlineCacheWrapperInlines.h"
#include "DFGJITCode.h"
#include "DFGJITFinalizer.h"
#include "DFGOSRExitCompiler.h"
for (unsigned i = 0; i < m_calls.size(); ++i)
linkBuffer.link(m_calls[i].m_call, m_calls[i].m_function);
- for (unsigned i = 0; i < m_propertyAccesses.size(); ++i) {
- StructureStubInfo& info = *m_propertyAccesses[i].m_stubInfo;
- CodeLocationCall callReturnLocation = linkBuffer.locationOf(m_propertyAccesses[i].m_slowPathGenerator->call());
- info.callReturnLocation = callReturnLocation;
- info.patch.deltaCheckImmToCall = differenceBetweenCodePtr(linkBuffer.locationOf(m_propertyAccesses[i].m_structureImm), callReturnLocation);
- info.patch.deltaCallToStructCheck = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_structureCheck));
-#if USE(JSVALUE64)
- info.patch.deltaCallToLoadOrStore = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_loadOrStore));
-#else
- info.patch.deltaCallToTagLoadOrStore = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_tagLoadOrStore));
- info.patch.deltaCallToPayloadLoadOrStore = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_payloadLoadOrStore));
-#endif
- info.patch.deltaCallToSlowCase = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_slowPathGenerator->label()));
- info.patch.deltaCallToDone = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_done));
- info.patch.deltaCallToStorageLoad = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_propertyStorageLoad));
- }
+ for (unsigned i = m_getByIds.size(); i--;)
+ m_getByIds[i].finalize(linkBuffer);
+ for (unsigned i = m_putByIds.size(); i--;)
+ m_putByIds[i].finalize(linkBuffer);
+
for (unsigned i = 0; i < m_ins.size(); ++i) {
StructureStubInfo& info = *m_ins[i].m_stubInfo;
CodeLocationLabel jump = linkBuffer.locationOf(m_ins[i].m_jump);
#include "CodeBlock.h"
#include "DFGDisassembler.h"
#include "DFGGraph.h"
+#include "DFGInlineCacheWrapper.h"
#include "DFGJITCode.h"
#include "DFGOSRExitCompilationInfo.h"
#include "DFGRegisterBank.h"
#include "FPRInfo.h"
#include "GPRInfo.h"
#include "JITCode.h"
+#include "JITInlineCacheGenerator.h"
#include "LinkBuffer.h"
#include "MacroAssembler.h"
#include "RegisterSet.h"
FunctionPtr m_function;
};
-struct PropertyAccessRecord {
- PropertyAccessRecord(
- MacroAssembler::DataLabelPtr structureImm,
- MacroAssembler::PatchableJump structureCheck,
- MacroAssembler::ConvertibleLoadLabel propertyStorageLoad,
-#if USE(JSVALUE64)
- MacroAssembler::DataLabelCompact loadOrStore,
-#elif USE(JSVALUE32_64)
- MacroAssembler::DataLabelCompact tagLoadOrStore,
- MacroAssembler::DataLabelCompact payloadLoadOrStore,
-#endif
- SlowPathGenerator* slowPathGenerator,
- MacroAssembler::Label done,
- StructureStubInfo* stubInfo)
- : m_structureImm(structureImm)
- , m_structureCheck(structureCheck)
- , m_propertyStorageLoad(propertyStorageLoad)
-#if USE(JSVALUE64)
- , m_loadOrStore(loadOrStore)
-#elif USE(JSVALUE32_64)
- , m_tagLoadOrStore(tagLoadOrStore)
- , m_payloadLoadOrStore(payloadLoadOrStore)
-#endif
- , m_slowPathGenerator(slowPathGenerator)
- , m_done(done)
- , m_stubInfo(stubInfo)
- {
- }
-
- MacroAssembler::DataLabelPtr m_structureImm;
- MacroAssembler::PatchableJump m_structureCheck;
- MacroAssembler::ConvertibleLoadLabel m_propertyStorageLoad;
-#if USE(JSVALUE64)
- MacroAssembler::DataLabelCompact m_loadOrStore;
-#elif USE(JSVALUE32_64)
- MacroAssembler::DataLabelCompact m_tagLoadOrStore;
- MacroAssembler::DataLabelCompact m_payloadLoadOrStore;
-#endif
- SlowPathGenerator* m_slowPathGenerator;
- MacroAssembler::Label m_done;
- StructureStubInfo* m_stubInfo;
-};
-
struct InRecord {
InRecord(
MacroAssembler::PatchableJump jump, SlowPathGenerator* slowPathGenerator,
}
#endif
- void addPropertyAccess(const PropertyAccessRecord& record)
+ void addGetById(const JITGetByIdGenerator& gen, SlowPathGenerator* slowPath)
{
- m_propertyAccesses.append(record);
+ m_getByIds.append(InlineCacheWrapper<JITGetByIdGenerator>(gen, slowPath));
}
+ void addPutById(const JITPutByIdGenerator& gen, SlowPathGenerator* slowPath)
+ {
+ m_putByIds.append(InlineCacheWrapper<JITPutByIdGenerator>(gen, slowPath));
+ }
+
void addIn(const InRecord& record)
{
m_ins.append(record);
CodeOrigin m_codeOrigin;
};
- Vector<PropertyAccessRecord, 4> m_propertyAccesses;
+ Vector<InlineCacheWrapper<JITGetByIdGenerator>, 4> m_getByIds;
+ Vector<InlineCacheWrapper<JITPutByIdGenerator>, 4> m_putByIds;
Vector<InRecord, 4> m_ins;
Vector<JSCallRecord, 4> m_jsCalls;
Vector<OSRExitCompilationInfo> m_exitCompilationInfo;
void SpeculativeJIT::cachedGetById(CodeOrigin codeOrigin, GPRReg baseTagGPROrNone, GPRReg basePayloadGPR, GPRReg resultTagGPR, GPRReg resultPayloadGPR, unsigned identifierNumber, JITCompiler::Jump slowPathTarget, SpillRegistersMode spillMode)
{
- StructureStubInfo* stubInfo = m_jit.codeBlock()->addStubInfo();
+ JITGetByIdGenerator gen(
+ m_jit.codeBlock(), codeOrigin, usedRegisters(),
+ JSValueRegs(baseTagGPROrNone, basePayloadGPR),
+ JSValueRegs(resultTagGPR, resultPayloadGPR), spillMode != NeedToSpill);
- JITCompiler::DataLabelPtr structureToCompare;
- JITCompiler::PatchableJump structureCheck = m_jit.patchableBranchPtrWithPatch(JITCompiler::NotEqual, JITCompiler::Address(basePayloadGPR, JSCell::structureOffset()), structureToCompare, JITCompiler::TrustedImmPtr(reinterpret_cast<void*>(unusedPointer)));
+ gen.generateFastPath(m_jit);
- JITCompiler::ConvertibleLoadLabel propertyStorageLoad = m_jit.convertibleLoadPtr(JITCompiler::Address(basePayloadGPR, JSObject::butterflyOffset()), resultPayloadGPR);
- JITCompiler::DataLabelCompact tagLoadWithPatch = m_jit.load32WithCompactAddressOffsetPatch(JITCompiler::Address(resultPayloadGPR, OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), resultTagGPR);
- JITCompiler::DataLabelCompact payloadLoadWithPatch = m_jit.load32WithCompactAddressOffsetPatch(JITCompiler::Address(resultPayloadGPR, OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), resultPayloadGPR);
+ JITCompiler::JumpList slowCases;
+ if (slowPathTarget.isSet())
+ slowCases.append(slowPathTarget);
+ slowCases.append(gen.slowPathJump());
- JITCompiler::Label doneLabel = m_jit.label();
-
OwnPtr<SlowPathGenerator> slowPath;
if (baseTagGPROrNone == InvalidGPRReg) {
- if (!slowPathTarget.isSet()) {
- slowPath = slowPathCall(
- structureCheck.m_jump, this, operationGetByIdOptimize,
- JSValueRegs(resultTagGPR, resultPayloadGPR), stubInfo,
- static_cast<int32_t>(JSValue::CellTag), basePayloadGPR,
- identifierUID(identifierNumber));
- } else {
- JITCompiler::JumpList slowCases;
- slowCases.append(structureCheck.m_jump);
- slowCases.append(slowPathTarget);
- slowPath = slowPathCall(
- slowCases, this, operationGetByIdOptimize,
- JSValueRegs(resultTagGPR, resultPayloadGPR), stubInfo,
- static_cast<int32_t>(JSValue::CellTag), basePayloadGPR,
- identifierUID(identifierNumber));
- }
+ slowPath = slowPathCall(
+ slowCases, this, operationGetByIdOptimize,
+ JSValueRegs(resultTagGPR, resultPayloadGPR), gen.stubInfo(),
+ static_cast<int32_t>(JSValue::CellTag), basePayloadGPR,
+ identifierUID(identifierNumber));
} else {
- if (!slowPathTarget.isSet()) {
- slowPath = slowPathCall(
- structureCheck.m_jump, this, operationGetByIdOptimize,
- JSValueRegs(resultTagGPR, resultPayloadGPR), stubInfo, baseTagGPROrNone,
- basePayloadGPR, identifierUID(identifierNumber));
- } else {
- JITCompiler::JumpList slowCases;
- slowCases.append(structureCheck.m_jump);
- slowCases.append(slowPathTarget);
- slowPath = slowPathCall(
- slowCases, this, operationGetByIdOptimize,
- JSValueRegs(resultTagGPR, resultPayloadGPR), stubInfo, baseTagGPROrNone,
- basePayloadGPR, identifierUID(identifierNumber));
- }
+ slowPath = slowPathCall(
+ slowCases, this, operationGetByIdOptimize,
+ JSValueRegs(resultTagGPR, resultPayloadGPR), gen.stubInfo(), baseTagGPROrNone,
+ basePayloadGPR, identifierUID(identifierNumber));
}
- RegisterSet currentlyUsedRegisters = usedRegisters();
- ASSERT(currentlyUsedRegisters.get(basePayloadGPR));
- ASSERT(currentlyUsedRegisters.get(resultTagGPR));
- ASSERT(currentlyUsedRegisters.get(resultPayloadGPR));
- stubInfo->codeOrigin = codeOrigin;
- stubInfo->patch.baseGPR = static_cast<int8_t>(basePayloadGPR);
- stubInfo->patch.valueGPR = static_cast<int8_t>(resultPayloadGPR);
- stubInfo->patch.valueTagGPR = static_cast<int8_t>(resultTagGPR);
- stubInfo->patch.usedRegisters = currentlyUsedRegisters;
- stubInfo->patch.registersFlushed = spillMode != NeedToSpill;
-
- m_jit.addPropertyAccess(
- PropertyAccessRecord(
- structureToCompare, structureCheck, propertyStorageLoad,
- tagLoadWithPatch, payloadLoadWithPatch, slowPath.get(), doneLabel,
- stubInfo));
+ m_jit.addGetById(gen, slowPath.get());
addSlowPathGenerator(slowPath.release());
}
void SpeculativeJIT::cachedPutById(CodeOrigin codeOrigin, GPRReg basePayloadGPR, GPRReg valueTagGPR, GPRReg valuePayloadGPR, Edge valueUse, GPRReg scratchGPR, unsigned identifierNumber, PutKind putKind, JITCompiler::Jump slowPathTarget)
{
- StructureStubInfo* stubInfo = m_jit.codeBlock()->addStubInfo();
-
- JITCompiler::DataLabelPtr structureToCompare;
- JITCompiler::PatchableJump structureCheck = m_jit.patchableBranchPtrWithPatch(JITCompiler::NotEqual, JITCompiler::Address(basePayloadGPR, JSCell::structureOffset()), structureToCompare, JITCompiler::TrustedImmPtr(reinterpret_cast<void*>(unusedPointer)));
-
writeBarrier(basePayloadGPR, valueTagGPR, valueUse, WriteBarrierForPropertyAccess, scratchGPR);
+
+ JITPutByIdGenerator gen(
+ m_jit.codeBlock(), codeOrigin, usedRegisters(), JSValueRegs::payloadOnly(basePayloadGPR),
+ JSValueRegs(valueTagGPR, valuePayloadGPR), scratchGPR, false,
+ m_jit.ecmaModeFor(codeOrigin), putKind);
+
+ gen.generateFastPath(m_jit);
+
+ JITCompiler::JumpList slowCases;
+ if (slowPathTarget.isSet())
+ slowCases.append(slowPathTarget);
+ slowCases.append(gen.slowPathJump());
- JITCompiler::ConvertibleLoadLabel propertyStorageLoad = m_jit.convertibleLoadPtr(JITCompiler::Address(basePayloadGPR, JSObject::butterflyOffset()), scratchGPR);
- JITCompiler::DataLabel32 tagStoreWithPatch = m_jit.store32WithAddressOffsetPatch(valueTagGPR, JITCompiler::Address(scratchGPR, OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)));
- JITCompiler::DataLabel32 payloadStoreWithPatch = m_jit.store32WithAddressOffsetPatch(valuePayloadGPR, JITCompiler::Address(scratchGPR, OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)));
+ OwnPtr<SlowPathGenerator> slowPath = slowPathCall(
+ slowCases, this, gen.slowPathFunction(), NoResult, gen.stubInfo(), valueTagGPR,
+ valuePayloadGPR, basePayloadGPR, identifierUID(identifierNumber));
- JITCompiler::Label doneLabel = m_jit.label();
- V_JITOperation_ESsiJJI optimizedCall;
- if (m_jit.strictModeFor(m_currentNode->codeOrigin)) {
- if (putKind == Direct)
- optimizedCall = operationPutByIdDirectStrictOptimize;
- else
- optimizedCall = operationPutByIdStrictOptimize;
- } else {
- if (putKind == Direct)
- optimizedCall = operationPutByIdDirectNonStrictOptimize;
- else
- optimizedCall = operationPutByIdNonStrictOptimize;
- }
- OwnPtr<SlowPathGenerator> slowPath;
- if (!slowPathTarget.isSet()) {
- slowPath = slowPathCall(
- structureCheck.m_jump, this, optimizedCall, NoResult, stubInfo, valueTagGPR,
- valuePayloadGPR, basePayloadGPR, identifierUID(identifierNumber));
- } else {
- JITCompiler::JumpList slowCases;
- slowCases.append(structureCheck.m_jump);
- slowCases.append(slowPathTarget);
- slowPath = slowPathCall(
- slowCases, this, optimizedCall, NoResult, stubInfo, valueTagGPR, valuePayloadGPR,
- basePayloadGPR, identifierUID(identifierNumber));
- }
- RegisterSet currentlyUsedRegisters = usedRegisters();
- currentlyUsedRegisters.clear(scratchGPR);
- ASSERT(currentlyUsedRegisters.get(basePayloadGPR));
- ASSERT(currentlyUsedRegisters.get(valueTagGPR));
- ASSERT(currentlyUsedRegisters.get(valuePayloadGPR));
-
- stubInfo->codeOrigin = codeOrigin;
- stubInfo->patch.baseGPR = static_cast<int8_t>(basePayloadGPR);
- stubInfo->patch.valueGPR = static_cast<int8_t>(valuePayloadGPR);
- stubInfo->patch.valueTagGPR = static_cast<int8_t>(valueTagGPR);
- stubInfo->patch.usedRegisters = currentlyUsedRegisters;
- stubInfo->patch.registersFlushed = false;
-
- m_jit.addPropertyAccess(
- PropertyAccessRecord(
- structureToCompare, structureCheck, propertyStorageLoad,
- JITCompiler::DataLabelCompact(tagStoreWithPatch.label()),
- JITCompiler::DataLabelCompact(payloadStoreWithPatch.label()),
- slowPath.get(), doneLabel, stubInfo));
+ m_jit.addPutById(gen, slowPath.get());
addSlowPathGenerator(slowPath.release());
}
void SpeculativeJIT::cachedGetById(CodeOrigin codeOrigin, GPRReg baseGPR, GPRReg resultGPR, unsigned identifierNumber, JITCompiler::Jump slowPathTarget, SpillRegistersMode spillMode)
{
- StructureStubInfo* stubInfo = m_jit.codeBlock()->addStubInfo();
+ JITGetByIdGenerator gen(
+ m_jit.codeBlock(), codeOrigin, usedRegisters(), JSValueRegs(baseGPR),
+ JSValueRegs(resultGPR), spillMode != NeedToSpill);
+ gen.generateFastPath(m_jit);
- JITCompiler::DataLabelPtr structureToCompare;
- JITCompiler::PatchableJump structureCheck = m_jit.patchableBranchPtrWithPatch(JITCompiler::NotEqual, JITCompiler::Address(baseGPR, JSCell::structureOffset()), structureToCompare, JITCompiler::TrustedImmPtr(reinterpret_cast<void*>(unusedPointer)));
+ JITCompiler::JumpList slowCases;
+ if (slowPathTarget.isSet())
+ slowCases.append(slowPathTarget);
+ slowCases.append(gen.slowPathJump());
- JITCompiler::ConvertibleLoadLabel propertyStorageLoad =
- m_jit.convertibleLoadPtr(JITCompiler::Address(baseGPR, JSObject::butterflyOffset()), resultGPR);
- JITCompiler::DataLabelCompact loadWithPatch = m_jit.load64WithCompactAddressOffsetPatch(JITCompiler::Address(resultGPR, 0), resultGPR);
+ OwnPtr<SlowPathGenerator> slowPath = slowPathCall(
+ slowCases, this, operationGetByIdOptimize, resultGPR, gen.stubInfo(), baseGPR,
+ identifierUID(identifierNumber), spillMode);
- JITCompiler::Label doneLabel = m_jit.label();
-
- OwnPtr<SlowPathGenerator> slowPath;
- if (!slowPathTarget.isSet()) {
- slowPath = slowPathCall(
- structureCheck.m_jump, this, operationGetByIdOptimize, resultGPR, stubInfo, baseGPR,
- identifierUID(identifierNumber), spillMode);
- } else {
- JITCompiler::JumpList slowCases;
- slowCases.append(structureCheck.m_jump);
- slowCases.append(slowPathTarget);
- slowPath = slowPathCall(
- slowCases, this, operationGetByIdOptimize, resultGPR, stubInfo, baseGPR,
- identifierUID(identifierNumber), spillMode);
- }
- RegisterSet currentlyUsedRegisters = usedRegisters();
- ASSERT(currentlyUsedRegisters.get(baseGPR));
- ASSERT(currentlyUsedRegisters.get(resultGPR));
-
- stubInfo->codeOrigin = codeOrigin;
- stubInfo->patch.baseGPR = static_cast<int8_t>(baseGPR);
- stubInfo->patch.valueGPR = static_cast<int8_t>(resultGPR);
- stubInfo->patch.usedRegisters = currentlyUsedRegisters;
- stubInfo->patch.registersFlushed = spillMode != NeedToSpill;
-
- m_jit.addPropertyAccess(
- PropertyAccessRecord(
- structureToCompare, structureCheck, propertyStorageLoad, loadWithPatch,
- slowPath.get(), doneLabel, stubInfo));
+ m_jit.addGetById(gen, slowPath.get());
addSlowPathGenerator(slowPath.release());
}
void SpeculativeJIT::cachedPutById(CodeOrigin codeOrigin, GPRReg baseGPR, GPRReg valueGPR, Edge valueUse, GPRReg scratchGPR, unsigned identifierNumber, PutKind putKind, JITCompiler::Jump slowPathTarget)
{
- StructureStubInfo* stubInfo = m_jit.codeBlock()->addStubInfo();
-
- JITCompiler::DataLabelPtr structureToCompare;
- JITCompiler::PatchableJump structureCheck = m_jit.patchableBranchPtrWithPatch(JITCompiler::NotEqual, JITCompiler::Address(baseGPR, JSCell::structureOffset()), structureToCompare, JITCompiler::TrustedImmPtr(reinterpret_cast<void*>(unusedPointer)));
-
writeBarrier(baseGPR, valueGPR, valueUse, WriteBarrierForPropertyAccess, scratchGPR);
- JITCompiler::ConvertibleLoadLabel propertyStorageLoad =
- m_jit.convertibleLoadPtr(JITCompiler::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR);
- JITCompiler::DataLabel32 storeWithPatch = m_jit.store64WithAddressOffsetPatch(valueGPR, JITCompiler::Address(scratchGPR, 0));
-
- JITCompiler::Label doneLabel = m_jit.label();
+ JITPutByIdGenerator gen(
+ m_jit.codeBlock(), codeOrigin, usedRegisters(), JSValueRegs(baseGPR),
+ JSValueRegs(valueGPR), scratchGPR, false, m_jit.ecmaModeFor(codeOrigin), putKind);
+ gen.generateFastPath(m_jit);
- V_JITOperation_ESsiJJI optimizedCall;
- if (m_jit.strictModeFor(m_currentNode->codeOrigin)) {
- if (putKind == Direct)
- optimizedCall = operationPutByIdDirectStrictOptimize;
- else
- optimizedCall = operationPutByIdStrictOptimize;
- } else {
- if (putKind == Direct)
- optimizedCall = operationPutByIdDirectNonStrictOptimize;
- else
- optimizedCall = operationPutByIdNonStrictOptimize;
- }
- OwnPtr<SlowPathGenerator> slowPath;
- if (!slowPathTarget.isSet()) {
- slowPath = slowPathCall(
- structureCheck.m_jump, this, optimizedCall, NoResult, stubInfo, valueGPR, baseGPR,
- identifierUID(identifierNumber));
- } else {
- JITCompiler::JumpList slowCases;
- slowCases.append(structureCheck.m_jump);
+ JITCompiler::JumpList slowCases;
+ if (slowPathTarget.isSet())
slowCases.append(slowPathTarget);
- slowPath = slowPathCall(
- slowCases, this, optimizedCall, NoResult, stubInfo, valueGPR, baseGPR,
- identifierUID(identifierNumber));
- }
- RegisterSet currentlyUsedRegisters = usedRegisters();
- currentlyUsedRegisters.clear(scratchGPR);
- ASSERT(currentlyUsedRegisters.get(baseGPR));
- ASSERT(currentlyUsedRegisters.get(valueGPR));
-
- stubInfo->codeOrigin = codeOrigin;
- stubInfo->patch.baseGPR = static_cast<int8_t>(baseGPR);
- stubInfo->patch.valueGPR = static_cast<int8_t>(valueGPR);
- stubInfo->patch.usedRegisters = currentlyUsedRegisters;
- stubInfo->patch.registersFlushed = false;
-
- m_jit.addPropertyAccess(
- PropertyAccessRecord(
- structureToCompare, structureCheck, propertyStorageLoad,
- JITCompiler::DataLabelCompact(storeWithPatch.label()), slowPath.get(), doneLabel,
- stubInfo));
+ slowCases.append(gen.slowPathJump());
+
+ OwnPtr<SlowPathGenerator> slowPath = slowPathCall(
+ slowCases, this, gen.slowPathFunction(), NoResult, gen.stubInfo(), valueGPR, baseGPR,
+ identifierUID(identifierNumber));
+
+ m_jit.addPutById(gen, slowPath.get());
addSlowPathGenerator(slowPath.release());
}
GPRReg arg3GPR = arg3.gpr();
flushRegisters();
if (node->op() == PutByValDirect)
- callOperation(m_jit.strictModeFor(node->codeOrigin) ? operationPutByValDirectStrict : operationPutByValDirectNonStrict, arg1GPR, arg2GPR, arg3GPR);
+ callOperation(m_jit.isStrictModeFor(node->codeOrigin) ? operationPutByValDirectStrict : operationPutByValDirectNonStrict, arg1GPR, arg2GPR, arg3GPR);
else
- callOperation(m_jit.strictModeFor(node->codeOrigin) ? operationPutByValStrict : operationPutByValNonStrict, arg1GPR, arg2GPR, arg3GPR);
+ callOperation(m_jit.isStrictModeFor(node->codeOrigin) ? operationPutByValStrict : operationPutByValNonStrict, arg1GPR, arg2GPR, arg3GPR);
noResult(node);
alreadyHandled = true;
return codeBlock()->globalObjectFor(codeOrigin);
}
- bool strictModeFor(CodeOrigin codeOrigin)
+ bool isStrictModeFor(CodeOrigin codeOrigin)
{
if (!codeOrigin.inlineCallFrame)
return codeBlock()->isStrictMode();
return jsCast<FunctionExecutable*>(codeOrigin.inlineCallFrame->executable.get())->isStrictMode();
}
+ ECMAMode ecmaModeFor(CodeOrigin codeOrigin)
+ {
+ return isStrictModeFor(codeOrigin) ? StrictMode : NotStrictMode;
+ }
+
ExecutableBase* executableFor(const CodeOrigin& codeOrigin);
CodeBlock* baselineCodeBlockFor(const CodeOrigin& codeOrigin)
bool operator!() const { return m_gpr == InvalidGPRReg; }
GPRReg gpr() const { return m_gpr; }
+ GPRReg tagGPR() const { return InvalidGPRReg; }
GPRReg payloadGPR() const { return m_gpr; }
private:
, m_interpreter(vm->interpreter)
, m_labels(codeBlock ? codeBlock->numberOfInstructions() : 0)
, m_bytecodeOffset((unsigned)-1)
- , m_propertyAccessInstructionIndex(UINT_MAX)
+ , m_getByIdIndex(UINT_MAX)
+ , m_putByIdIndex(UINT_MAX)
, m_byValInstructionIndex(UINT_MAX)
, m_callLinkInfoIndex(UINT_MAX)
#if USE(JSVALUE32_64)
{
Instruction* instructionsBegin = m_codeBlock->instructions().begin();
- m_propertyAccessInstructionIndex = 0;
+ m_getByIdIndex = 0;
+ m_putByIdIndex = 0;
m_byValInstructionIndex = 0;
m_callLinkInfoIndex = 0;
emitJumpSlowToHot(jump(), 0);
}
- RELEASE_ASSERT(m_propertyAccessInstructionIndex == m_propertyAccessCompilationInfo.size());
+ RELEASE_ASSERT(m_getByIdIndex == m_getByIds.size());
+ RELEASE_ASSERT(m_putByIdIndex == m_putByIds.size());
RELEASE_ASSERT(m_callLinkInfoIndex == m_callStructureStubCompilationInfo.size());
#if ENABLE(VALUE_PROFILER)
RELEASE_ASSERT(numberOfValueProfiles == m_codeBlock->numberOfValueProfiles());
#endif
}
-ALWAYS_INLINE void PropertyStubCompilationInfo::copyToStubInfo(LinkBuffer &linkBuffer)
-{
- ASSERT(bytecodeIndex != std::numeric_limits<unsigned>::max());
- stubInfo->codeOrigin = CodeOrigin(bytecodeIndex);
- stubInfo->callReturnLocation = linkBuffer.locationOf(callReturnLocation);
-
- stubInfo->patch.deltaCheckImmToCall = MacroAssembler::differenceBetweenCodePtr(linkBuffer.locationOf(structureToCompare), stubInfo->callReturnLocation);
- stubInfo->patch.deltaCallToStructCheck = MacroAssembler::differenceBetweenCodePtr(stubInfo->callReturnLocation, linkBuffer.locationOf(structureCheck));
-
- stubInfo->patch.deltaCallToSlowCase = MacroAssembler::differenceBetweenCodePtr(stubInfo->callReturnLocation, linkBuffer.locationOf(coldPathBegin));
- stubInfo->patch.deltaCallToDone = MacroAssembler::differenceBetweenCodePtr(stubInfo->callReturnLocation, linkBuffer.locationOf(done));
- stubInfo->patch.deltaCallToStorageLoad = MacroAssembler::differenceBetweenCodePtr(stubInfo->callReturnLocation, linkBuffer.locationOf(propertyStorageLoad));
-
- stubInfo->patch.baseGPR = GPRInfo::regT0;
-
- RegisterSet usedRegisters;
- usedRegisters.set(GPRInfo::regT0);
-
-#if USE(JSVALUE64) // JSVALUE cases
- switch (m_type) {
- case GetById:
- stubInfo->patch.deltaCallToLoadOrStore = MacroAssembler::differenceBetweenCodePtr(stubInfo->callReturnLocation, linkBuffer.locationOf(getDisplacementLabel));
- stubInfo->patch.valueGPR = GPRInfo::regT0;
- break;
- case PutById:
- stubInfo->patch.deltaCallToLoadOrStore = MacroAssembler::differenceBetweenCodePtr(stubInfo->callReturnLocation, linkBuffer.locationOf(putDisplacementLabel));
- stubInfo->patch.valueGPR = GPRInfo::regT1;
- usedRegisters.set(GPRInfo::regT1);
- break;
- }
-#else // JSVALUE cases
- switch (m_type) {
- case GetById:
- stubInfo->patch.deltaCallToTagLoadOrStore = MacroAssembler::differenceBetweenCodePtr(stubInfo->callReturnLocation, linkBuffer.locationOf(getDisplacementLabel2));
- stubInfo->patch.deltaCallToPayloadLoadOrStore = MacroAssembler::differenceBetweenCodePtr(stubInfo->callReturnLocation, linkBuffer.locationOf(getDisplacementLabel1));
- stubInfo->patch.valueGPR = GPRInfo::regT0;
- stubInfo->patch.valueTagGPR = GPRInfo::regT1;
- usedRegisters.set(GPRInfo::regT1);
- break;
- case PutById:
- stubInfo->patch.deltaCallToTagLoadOrStore = MacroAssembler::differenceBetweenCodePtr(stubInfo->callReturnLocation, linkBuffer.locationOf(putDisplacementLabel2));
- stubInfo->patch.deltaCallToPayloadLoadOrStore = MacroAssembler::differenceBetweenCodePtr(stubInfo->callReturnLocation, linkBuffer.locationOf(putDisplacementLabel1));
- stubInfo->patch.valueGPR = GPRInfo::regT2;
- stubInfo->patch.valueTagGPR = GPRInfo::regT3;
- usedRegisters.set(GPRInfo::regT2);
- usedRegisters.set(GPRInfo::regT3);
- break;
- }
-#endif // JSVALUE cases
-
- stubInfo->patch.usedRegisters = usedRegisters;
- stubInfo->patch.registersFlushed = true;
-}
-
CompilationResult JIT::privateCompile(JITCompilationEffort effort)
{
#if ENABLE(VALUE_PROFILER)
patchBuffer.link(iter->from, FunctionPtr(iter->to));
}
- for (unsigned i = 0; i < m_propertyAccessCompilationInfo.size(); ++i)
- m_propertyAccessCompilationInfo[i].copyToStubInfo(patchBuffer);
+ for (unsigned i = m_getByIds.size(); i--;)
+ m_getByIds[i].finalize(patchBuffer);
+ for (unsigned i = m_putByIds.size(); i--;)
+ m_putByIds[i].finalize(patchBuffer);
+
m_codeBlock->setNumberOfByValInfos(m_byValCompilationInfo.size());
for (unsigned i = 0; i < m_byValCompilationInfo.size(); ++i) {
CodeLocationJump badTypeJump = CodeLocationJump(patchBuffer.locationOf(m_byValCompilationInfo[i].badTypeJump));
#include "CompactJITCodeMap.h"
#include "Interpreter.h"
#include "JITDisassembler.h"
+#include "JITInlineCacheGenerator.h"
#include "JSInterfaceJIT.h"
#include "LegacyProfiler.h"
#include "Opcode.h"
}
};
- enum PropertyStubGetById_T { PropertyStubGetById };
- enum PropertyStubPutById_T { PropertyStubPutById };
-
- struct PropertyStubCompilationInfo {
- enum Type { GetById, PutById } m_type;
-
- unsigned bytecodeIndex;
- MacroAssembler::Call callReturnLocation;
- MacroAssembler::Label hotPathBegin;
- MacroAssembler::DataLabelPtr structureToCompare;
- MacroAssembler::PatchableJump structureCheck;
- MacroAssembler::ConvertibleLoadLabel propertyStorageLoad;
-#if USE(JSVALUE64)
- MacroAssembler::DataLabelCompact getDisplacementLabel;
-#else
- MacroAssembler::DataLabelCompact getDisplacementLabel1;
- MacroAssembler::DataLabelCompact getDisplacementLabel2;
-#endif
- MacroAssembler::Label done;
- MacroAssembler::Label coldPathBegin;
-#if USE(JSVALUE64)
- MacroAssembler::DataLabel32 putDisplacementLabel;
-#else
- MacroAssembler::DataLabel32 putDisplacementLabel1;
- MacroAssembler::DataLabel32 putDisplacementLabel2;
-#endif
- StructureStubInfo* stubInfo;
-
-#if !ASSERT_DISABLED
- PropertyStubCompilationInfo()
- : bytecodeIndex(std::numeric_limits<unsigned>::max())
- {
- }
-#endif
-
-
- PropertyStubCompilationInfo(
- PropertyStubGetById_T, unsigned bytecodeIndex,
- MacroAssembler::DataLabelPtr structureToCompare,
- MacroAssembler::PatchableJump structureCheck,
- MacroAssembler::ConvertibleLoadLabel propertyStorageLoad,
-#if USE(JSVALUE64)
- MacroAssembler::DataLabelCompact displacementLabel,
-#else
- MacroAssembler::DataLabelCompact displacementLabel1,
- MacroAssembler::DataLabelCompact displacementLabel2,
-#endif
- MacroAssembler::Label done)
- : m_type(GetById)
- , bytecodeIndex(bytecodeIndex)
- , structureToCompare(structureToCompare)
- , structureCheck(structureCheck)
- , propertyStorageLoad(propertyStorageLoad)
-#if USE(JSVALUE64)
- , getDisplacementLabel(displacementLabel)
-#else
- , getDisplacementLabel1(displacementLabel1)
- , getDisplacementLabel2(displacementLabel2)
-#endif
- , done(done)
- {
- }
-
- PropertyStubCompilationInfo(
- PropertyStubPutById_T, unsigned bytecodeIndex,
- MacroAssembler::DataLabelPtr structureToCompare,
- MacroAssembler::PatchableJump structureCheck,
- MacroAssembler::ConvertibleLoadLabel propertyStorageLoad,
-#if USE(JSVALUE64)
- MacroAssembler::DataLabel32 displacementLabel,
-#else
- MacroAssembler::DataLabel32 displacementLabel1,
- MacroAssembler::DataLabel32 displacementLabel2,
-#endif
- MacroAssembler::Label done)
- : m_type(PutById)
- , bytecodeIndex(bytecodeIndex)
- , structureToCompare(structureToCompare)
- , structureCheck(structureCheck)
- , propertyStorageLoad(propertyStorageLoad)
- , done(done)
-#if USE(JSVALUE64)
- , putDisplacementLabel(displacementLabel)
-#else
- , putDisplacementLabel1(displacementLabel1)
- , putDisplacementLabel2(displacementLabel2)
-#endif
- {
- }
-
- void slowCaseInfo(MacroAssembler::Label coldPathBegin, MacroAssembler::Call call, StructureStubInfo* info)
- {
- callReturnLocation = call;
- this->coldPathBegin = coldPathBegin;
- stubInfo = info;
- }
-
- void slowCaseInfo(MacroAssembler::Call call)
- {
- callReturnLocation = call;
- }
-
- void copyToStubInfo(LinkBuffer &patchBuffer);
- };
-
struct ByValCompilationInfo {
ByValCompilationInfo() { }
class JIT : private JSInterfaceJIT {
friend class JITSlowPathCall;
friend class JITStubCall;
- friend struct PropertyStubCompilationInfo;
using MacroAssembler::Jump;
using MacroAssembler::JumpList;
Vector<CallRecord> m_calls;
Vector<Label> m_labels;
- Vector<PropertyStubCompilationInfo> m_propertyAccessCompilationInfo;
+ Vector<JITGetByIdGenerator> m_getByIds;
+ Vector<JITPutByIdGenerator> m_putByIds;
Vector<ByValCompilationInfo> m_byValCompilationInfo;
Vector<StructureStubCompilationInfo> m_callStructureStubCompilationInfo;
Vector<JumpTable> m_jmpTable;
JumpList m_exceptionChecks;
JumpList m_exceptionChecksWithCallFrameRollback;
- unsigned m_propertyAccessInstructionIndex;
+ unsigned m_getByIdIndex;
+ unsigned m_putByIdIndex;
unsigned m_byValInstructionIndex;
unsigned m_callLinkInfoIndex;
--- /dev/null
+/*
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "JITInlineCacheGenerator.h"
+
+#if ENABLE(JIT)
+
+#include "CodeBlock.h"
+#include "LinkBuffer.h"
+#include "Operations.h"
+
+namespace JSC {
+
+JITInlineCacheGenerator::JITInlineCacheGenerator(CodeBlock* codeBlock, CodeOrigin codeOrigin)
+ : m_codeBlock(codeBlock)
+{
+ m_stubInfo = m_codeBlock->addStubInfo();
+ m_stubInfo->codeOrigin = codeOrigin;
+}
+
+JITByIdGenerator::JITByIdGenerator(
+ CodeBlock* codeBlock, CodeOrigin codeOrigin, const RegisterSet& usedRegisters,
+ JSValueRegs base, JSValueRegs value, bool registersFlushed)
+ : JITInlineCacheGenerator(codeBlock, codeOrigin)
+ , m_base(base)
+ , m_value(value)
+{
+ m_stubInfo->patch.registersFlushed = registersFlushed;
+ m_stubInfo->patch.usedRegisters = usedRegisters;
+
+ // This is a convenience - in cases where the only registers you're using are base/value,
+ // it allows you to pass RegisterSet() as the usedRegisters argument.
+ m_stubInfo->patch.usedRegisters.set(base);
+ m_stubInfo->patch.usedRegisters.set(value);
+
+ m_stubInfo->patch.baseGPR = static_cast<int8_t>(base.payloadGPR());
+ m_stubInfo->patch.valueGPR = static_cast<int8_t>(value.payloadGPR());
+#if USE(JSVALUE32_64)
+ m_stubInfo->patch.valueTagGPR = static_cast<int8_t>(value.tagGPR());
+#endif
+}
+
+void JITByIdGenerator::finalize(LinkBuffer& linkBuffer)
+{
+ CodeLocationCall callReturnLocation = linkBuffer.locationOf(m_call);
+ m_stubInfo->callReturnLocation = callReturnLocation;
+ m_stubInfo->patch.deltaCheckImmToCall = MacroAssembler::differenceBetweenCodePtr(
+ linkBuffer.locationOf(m_structureImm), callReturnLocation);
+ m_stubInfo->patch.deltaCallToStructCheck = MacroAssembler::differenceBetweenCodePtr(
+ callReturnLocation, linkBuffer.locationOf(m_structureCheck));
+#if USE(JSVALUE64)
+ m_stubInfo->patch.deltaCallToLoadOrStore = MacroAssembler::differenceBetweenCodePtr(
+ callReturnLocation, linkBuffer.locationOf(m_loadOrStore));
+#else
+ m_stubInfo->patch.deltaCallToTagLoadOrStore = MacroAssembler::differenceBetweenCodePtr(
+ callReturnLocation, linkBuffer.locationOf(m_tagLoadOrStore));
+ m_stubInfo->patch.deltaCallToPayloadLoadOrStore = MacroAssembler::differenceBetweenCodePtr(
+ callReturnLocation, linkBuffer.locationOf(m_loadOrStore));
+#endif
+ m_stubInfo->patch.deltaCallToSlowCase = MacroAssembler::differenceBetweenCodePtr(
+ callReturnLocation, linkBuffer.locationOf(m_slowPathBegin));
+ m_stubInfo->patch.deltaCallToDone = MacroAssembler::differenceBetweenCodePtr(
+ callReturnLocation, linkBuffer.locationOf(m_done));
+ m_stubInfo->patch.deltaCallToStorageLoad = MacroAssembler::differenceBetweenCodePtr(
+ callReturnLocation, linkBuffer.locationOf(m_propertyStorageLoad));
+}
+
+void JITByIdGenerator::generateFastPathChecks(MacroAssembler& jit, GPRReg butterfly)
+{
+ m_structureCheck = jit.patchableBranchPtrWithPatch(
+ MacroAssembler::NotEqual,
+ MacroAssembler::Address(m_base.payloadGPR(), JSCell::structureOffset()),
+ m_structureImm, MacroAssembler::TrustedImmPtr(reinterpret_cast<void*>(unusedPointer)));
+
+ m_propertyStorageLoad = jit.convertibleLoadPtr(
+ MacroAssembler::Address(m_base.payloadGPR(), JSObject::butterflyOffset()), butterfly);
+}
+
+void JITGetByIdGenerator::generateFastPath(MacroAssembler& jit)
+{
+ generateFastPathChecks(jit, m_value.payloadGPR());
+
+#if USE(JSVALUE64)
+ m_loadOrStore = jit.load64WithCompactAddressOffsetPatch(
+ MacroAssembler::Address(m_value.payloadGPR(), 0), m_value.payloadGPR()).label();
+#else
+ m_tagLoadOrStore = jit.load32WithCompactAddressOffsetPatch(
+ MacroAssembler::Address(m_value.payloadGPR(), 0), m_value.tagGPR()).label();
+ m_loadOrStore = jit.load32WithCompactAddressOffsetPatch(
+ MacroAssembler::Address(m_value.payloadGPR(), 0), m_value.payloadGPR()).label();
+#endif
+
+ m_done = jit.label();
+}
+
+JITPutByIdGenerator::JITPutByIdGenerator(
+ CodeBlock* codeBlock, CodeOrigin codeOrigin, const RegisterSet& usedRegisters,
+ JSValueRegs base, JSValueRegs value, GPRReg scratch, bool registersFlushed,
+ ECMAMode ecmaMode, PutKind putKind)
+ : JITByIdGenerator(codeBlock, codeOrigin, usedRegisters, base, value, registersFlushed)
+ , m_scratch(scratch)
+ , m_ecmaMode(ecmaMode)
+ , m_putKind(putKind)
+{
+ m_stubInfo->patch.usedRegisters.clear(scratch);
+}
+
+void JITPutByIdGenerator::generateFastPath(MacroAssembler& jit)
+{
+ generateFastPathChecks(jit, m_scratch);
+
+#if USE(JSVALUE64)
+ m_loadOrStore = jit.store64WithAddressOffsetPatch(
+ m_value.payloadGPR(), MacroAssembler::Address(m_scratch, 0)).label();
+#else
+ m_tagLoadOrStore = jit.store32WithAddressOffsetPatch(
+ m_value.tagGPR(), MacroAssembler::Address(m_scratch, 0)).label();
+ m_loadOrStore = jit.store32WithAddressOffsetPatch(
+ m_value.payloadGPR(), MacroAssembler::Address(m_scratch, 0)).label();
+#endif
+
+ m_done = jit.label();
+}
+
+V_JITOperation_ESsiJJI JITPutByIdGenerator::slowPathFunction()
+{
+ if (m_ecmaMode == StrictMode) {
+ if (m_putKind == Direct)
+ return operationPutByIdDirectStrictOptimize;
+ return operationPutByIdStrictOptimize;
+ }
+ if (m_putKind == Direct)
+ return operationPutByIdDirectNonStrictOptimize;
+ return operationPutByIdNonStrictOptimize;
+}
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
+
--- /dev/null
+/*
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef JITInlineCacheGenerator_h
+#define JITInlineCacheGenerator_h
+
+#if ENABLE(JIT)
+
+#include "CodeOrigin.h"
+#include "JITOperations.h"
+#include "JSCJSValue.h"
+#include "PutKind.h"
+
+namespace JSC {
+
+class CodeBlock;
+
+class JITInlineCacheGenerator {
+protected:
+ JITInlineCacheGenerator() { }
+ JITInlineCacheGenerator(CodeBlock*, CodeOrigin);
+
+public:
+ StructureStubInfo* stubInfo() const { return m_stubInfo; }
+
+protected:
+ CodeBlock* m_codeBlock;
+ StructureStubInfo* m_stubInfo;
+};
+
+class JITByIdGenerator : public JITInlineCacheGenerator {
+protected:
+ JITByIdGenerator() { }
+
+ JITByIdGenerator(
+ CodeBlock*, CodeOrigin, const RegisterSet&, JSValueRegs base, JSValueRegs value,
+ bool registersFlushed);
+
+public:
+ void reportSlowPathCall(MacroAssembler::Label slowPathBegin, MacroAssembler::Call call)
+ {
+ m_slowPathBegin = slowPathBegin;
+ m_call = call;
+ }
+
+ MacroAssembler::Jump slowPathJump() const { return m_structureCheck.m_jump; }
+
+ void finalize(LinkBuffer&);
+
+protected:
+ void generateFastPathChecks(MacroAssembler&, GPRReg butterfly);
+
+ JSValueRegs m_base;
+ JSValueRegs m_value;
+
+ MacroAssembler::DataLabelPtr m_structureImm;
+ MacroAssembler::PatchableJump m_structureCheck;
+ MacroAssembler::ConvertibleLoadLabel m_propertyStorageLoad;
+ AssemblerLabel m_loadOrStore;
+#if USE(JSVALUE32_64)
+ AssemblerLabel m_tagLoadOrStore;
+#endif
+ MacroAssembler::Label m_done;
+ MacroAssembler::Label m_slowPathBegin;
+ MacroAssembler::Call m_call;
+};
+
+class JITGetByIdGenerator : public JITByIdGenerator {
+public:
+ JITGetByIdGenerator() { }
+
+ JITGetByIdGenerator(
+ CodeBlock* codeBlock, CodeOrigin codeOrigin, const RegisterSet& usedRegisters,
+ JSValueRegs base, JSValueRegs value, bool registersFlushed)
+ : JITByIdGenerator(codeBlock, codeOrigin, usedRegisters, base, value, registersFlushed)
+ {
+ }
+
+ void generateFastPath(MacroAssembler&);
+};
+
+class JITPutByIdGenerator : public JITByIdGenerator {
+public:
+ JITPutByIdGenerator() { }
+
+ JITPutByIdGenerator(
+ CodeBlock*, CodeOrigin, const RegisterSet& usedRegisters, JSValueRegs base,
+ JSValueRegs value, GPRReg scratch, bool registersFlushed, ECMAMode, PutKind);
+
+ void generateFastPath(MacroAssembler&);
+
+ V_JITOperation_ESsiJJI slowPathFunction();
+
+private:
+ GPRReg m_scratch;
+ ECMAMode m_ecmaMode;
+ PutKind m_putKind;
+};
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
+
+#endif // JITInlineCacheGenerator_h
+
const Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
emitGetVirtualRegister(baseVReg, regT0);
- compileGetByIdHotPath(baseVReg, ident);
- emitValueProfilingSite(regT4);
- emitPutVirtualRegister(resultVReg);
-}
-
-void JIT::compileGetByIdHotPath(int baseVReg, const Identifier* ident)
-{
- // As for put_by_id, get_by_id requires the offset of the Structure and the offset of the access to be patched.
- // Additionally, for get_by_id we need patch the offset of the branch to the slow case (we patch this to jump
- // to array-length / prototype access tranpolines, and finally we also the the property-map access offset as a label
- // to jump back to if one of these trampolies finds a match.
-
+
emitJumpSlowCaseIfNotJSCell(regT0, baseVReg);
if (*ident == m_vm->propertyNames->length && shouldEmitProfiling()) {
emitArrayProfilingSiteForBytecodeIndex(regT1, regT2, m_bytecodeOffset);
}
- DataLabelPtr structureToCompare;
- PatchableJump structureCheck = patchableBranchPtrWithPatch(
- NotEqual, Address(regT0, JSCell::structureOffset()), structureToCompare,
- TrustedImmPtr(reinterpret_cast<void*>(unusedPointer)));
-
- addSlowCase(structureCheck);
-
- ConvertibleLoadLabel propertyStorageLoad =
- convertibleLoadPtr(Address(regT0, JSObject::butterflyOffset()), regT0);
-
- DataLabelCompact loadWithPatch = load64WithCompactAddressOffsetPatch(
- Address(regT0, patchGetByIdDefaultOffset), regT0);
-
- Label done = label();
-
- m_propertyAccessCompilationInfo.append(
- PropertyStubCompilationInfo(
- PropertyStubGetById, m_bytecodeOffset, structureToCompare, structureCheck,
- propertyStorageLoad, loadWithPatch, done));
+ JITGetByIdGenerator gen(
+ m_codeBlock, CodeOrigin(m_bytecodeOffset), RegisterSet(),
+ JSValueRegs(regT0), JSValueRegs(regT0), true);
+ gen.generateFastPath(*this);
+ addSlowCase(gen.slowPathJump());
+ m_getByIds.append(gen);
+
+ emitValueProfilingSite(regT4);
+ emitPutVirtualRegister(resultVReg);
}
void JIT::emitSlow_op_get_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
linkSlowCaseIfNotJSCell(iter, baseVReg);
linkSlowCase(iter);
- Label coldPathBegin = label();
+ JITGetByIdGenerator& gen = m_getByIds[m_getByIdIndex++];
- StructureStubInfo* stubInfo = m_codeBlock->addStubInfo();
+ Label coldPathBegin = label();
- Call call = callOperation(WithProfile, operationGetByIdOptimize, resultVReg, stubInfo, regT0, ident->impl());
+ Call call = callOperation(WithProfile, operationGetByIdOptimize, resultVReg, gen.stubInfo(), regT0, ident->impl());
- m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex++].slowCaseInfo(coldPathBegin, call, stubInfo);
+ gen.reportSlowPathCall(coldPathBegin, call);
}
void JIT::emit_op_put_by_id(Instruction* currentInstruction)
{
int baseVReg = currentInstruction[1].u.operand;
int valueVReg = currentInstruction[3].u.operand;
+ unsigned direct = currentInstruction[8].u.operand;
// In order to be able to patch both the Structure, and the object offset, we store one pointer,
// to just after the arguments have been loaded into registers 'hotPathBegin', and we generate code
// Jump to a slow case if either the base object is an immediate, or if the Structure does not match.
emitJumpSlowCaseIfNotJSCell(regT0, baseVReg);
-
- DataLabelPtr structureToCompare;
- PatchableJump structureCheck = patchableBranchPtrWithPatch(
- NotEqual, Address(regT0, JSCell::structureOffset()), structureToCompare,
- TrustedImmPtr(reinterpret_cast<void*>(unusedPointer)));
-
- addSlowCase(structureCheck);
emitWriteBarrier(regT0, regT1, regT2, regT3, ShouldFilterImmediates, WriteBarrierForPropertyAccess);
- ConvertibleLoadLabel propertyStorageLoad =
- convertibleLoadPtr(Address(regT0, JSObject::butterflyOffset()), regT2);
-
- DataLabel32 storeWithPatch =
- store64WithAddressOffsetPatch(regT1, Address(regT2, patchPutByIdDefaultOffset));
+ JITPutByIdGenerator gen(
+ m_codeBlock, CodeOrigin(m_bytecodeOffset), RegisterSet(), JSValueRegs(regT0),
+ JSValueRegs(regT1), regT2, true, m_codeBlock->ecmaMode(), direct ? Direct : NotDirect);
- Label done = label();
+ gen.generateFastPath(*this);
+ addSlowCase(gen.slowPathJump());
- m_propertyAccessCompilationInfo.append(
- PropertyStubCompilationInfo(
- PropertyStubPutById, m_bytecodeOffset, structureToCompare, structureCheck,
- propertyStorageLoad, storeWithPatch, done));
+ m_putByIds.append(gen);
}
void JIT::emitSlow_op_put_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
int baseVReg = currentInstruction[1].u.operand;
const Identifier* ident = &(m_codeBlock->identifier(currentInstruction[2].u.operand));
- unsigned direct = currentInstruction[8].u.operand;
linkSlowCaseIfNotJSCell(iter, baseVReg);
linkSlowCase(iter);
Label coldPathBegin(this);
- V_JITOperation_ESsiJJI optimizedCall;
- if (m_codeBlock->isStrictMode()) {
- if (direct)
- optimizedCall = operationPutByIdDirectStrictOptimize;
- else
- optimizedCall = operationPutByIdStrictOptimize;
- } else {
- if (direct)
- optimizedCall = operationPutByIdDirectNonStrictOptimize;
- else
- optimizedCall = operationPutByIdNonStrictOptimize;
- }
-
- StructureStubInfo* stubInfo = m_codeBlock->addStubInfo();
+ JITPutByIdGenerator& gen = m_putByIds[m_putByIdIndex++];
- Call call = callOperation(optimizedCall, stubInfo, regT1, regT0, ident->impl());
+ Call call = callOperation(
+ gen.slowPathFunction(), gen.stubInfo(), regT1, regT0, ident->impl());
- // Track the location of the call; this will be used to recover patch information.
- m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex++].slowCaseInfo(coldPathBegin, call, stubInfo);
+ gen.reportSlowPathCall(coldPathBegin, call);
}
// Compile a store into an object's property storage. May overwrite the
emitLoad(base, regT1, regT0);
emitJumpSlowCaseIfNotJSCell(base, regT1);
- compileGetByIdHotPath(ident);
- emitValueProfilingSite(regT4);
- emitStore(dst, regT1, regT0);
- map(m_bytecodeOffset + OPCODE_LENGTH(op_get_by_id), dst, regT1, regT0);
-}
-void JIT::compileGetByIdHotPath(const Identifier* ident)
-{
if (*ident == m_vm->propertyNames->length && shouldEmitProfiling()) {
loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
emitArrayProfilingSiteForBytecodeIndex(regT2, regT3, m_bytecodeOffset);
}
- DataLabelPtr structureToCompare;
- PatchableJump structureCheck = patchableBranchPtrWithPatch(
- NotEqual, Address(regT0, JSCell::structureOffset()), structureToCompare,
- TrustedImmPtr(reinterpret_cast<void*>(unusedPointer)));
-
- addSlowCase(structureCheck);
-
- ConvertibleLoadLabel propertyStorageLoad =
- convertibleLoadPtr(Address(regT0, JSObject::butterflyOffset()), regT0);
-
- DataLabelCompact loadWithPatchTag = load32WithCompactAddressOffsetPatch(
- Address(regT0, patchGetByIdDefaultOffset), regT1);
- DataLabelCompact loadWithPatchPayload = load32WithCompactAddressOffsetPatch(
- Address(regT0, patchGetByIdDefaultOffset), regT0);
-
- Label done = label();
-
- m_propertyAccessCompilationInfo.append(
- PropertyStubCompilationInfo(
- PropertyStubGetById, m_bytecodeOffset, structureToCompare, structureCheck,
- propertyStorageLoad, loadWithPatchPayload, loadWithPatchTag, done));
+ JITGetByIdGenerator gen(
+ m_codeBlock, CodeOrigin(m_bytecodeOffset), RegisterSet(),
+ JSValueRegs::payloadOnly(regT0), JSValueRegs(regT1, regT0), true);
+ gen.generateFastPath(*this);
+ addSlowCase(gen.slowPathJump());
+ m_getByIds.append(gen);
+
+ emitValueProfilingSite(regT4);
+ emitStore(dst, regT1, regT0);
+ map(m_bytecodeOffset + OPCODE_LENGTH(op_get_by_id), dst, regT1, regT0);
}
void JIT::emitSlow_op_get_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
linkSlowCaseIfNotJSCell(iter, baseVReg);
linkSlowCase(iter);
+ JITGetByIdGenerator& gen = m_getByIds[m_getByIdIndex++];
+
Label coldPathBegin = label();
- StructureStubInfo* stubInfo = m_codeBlock->addStubInfo();
+ Call call = callOperation(WithProfile, operationGetByIdOptimize, resultVReg, gen.stubInfo(), regT1, regT0, ident->impl());
- Call call = callOperation(WithProfile, operationGetByIdOptimize, resultVReg, stubInfo, regT1, regT0, ident->impl());
-
- m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex++].slowCaseInfo(coldPathBegin, call, stubInfo);
+ gen.reportSlowPathCall(coldPathBegin, call);
}
void JIT::emit_op_put_by_id(Instruction* currentInstruction)
int base = currentInstruction[1].u.operand;
int value = currentInstruction[3].u.operand;
+ int direct = currentInstruction[8].u.operand;
emitLoad2(base, regT1, regT0, value, regT3, regT2);
emitJumpSlowCaseIfNotJSCell(base, regT1);
- DataLabelPtr structureToCompare;
- PatchableJump structureCheck = patchableBranchPtrWithPatch(
- NotEqual, Address(regT0, JSCell::structureOffset()), structureToCompare,
- TrustedImmPtr(reinterpret_cast<void*>(unusedPointer)));
-
- addSlowCase(structureCheck);
+ emitWriteBarrier(regT0, regT1, regT2, regT3, ShouldFilterImmediates, WriteBarrierForPropertyAccess);
- emitWriteBarrier(regT0, regT2, regT1, regT2, ShouldFilterImmediates, WriteBarrierForPropertyAccess);
-
- ConvertibleLoadLabel propertyStorageLoad =
- convertibleLoadPtr(Address(regT0, JSObject::butterflyOffset()), regT1);
+ JITPutByIdGenerator gen(
+ m_codeBlock, CodeOrigin(m_bytecodeOffset), RegisterSet(),
+ JSValueRegs::payloadOnly(regT0), JSValueRegs(regT3, regT2), regT1, true,
+ m_codeBlock->ecmaMode(), direct ? Direct : NotDirect);
- // Payload.
- DataLabel32 storeWithPatch1 =
- store32WithAddressOffsetPatch(regT2, Address(regT1, patchPutByIdDefaultOffset));
- // Tag.
- DataLabel32 storeWithPatch2 =
- store32WithAddressOffsetPatch(regT3, Address(regT1, patchPutByIdDefaultOffset));
+ gen.generateFastPath(*this);
+ addSlowCase(gen.slowPathJump());
- Label done = label();
-
- m_propertyAccessCompilationInfo.append(
- PropertyStubCompilationInfo(
- PropertyStubPutById, m_bytecodeOffset, structureToCompare, structureCheck,
- propertyStorageLoad, storeWithPatch1, storeWithPatch2, done));
+ m_putByIds.append(gen);
}
void JIT::emitSlow_op_put_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
int base = currentInstruction[1].u.operand;
const Identifier* ident = &(m_codeBlock->identifier(currentInstruction[2].u.operand));
- int direct = currentInstruction[8].u.operand;
linkSlowCaseIfNotJSCell(iter, base);
linkSlowCase(iter);
Label coldPathBegin(this);
- V_JITOperation_ESsiJJI optimizedCall;
- if (m_codeBlock->isStrictMode()) {
- if (direct)
- optimizedCall = operationPutByIdDirectStrictOptimize;
- else
- optimizedCall = operationPutByIdStrictOptimize;
- } else {
- if (direct)
- optimizedCall = operationPutByIdDirectNonStrictOptimize;
- else
- optimizedCall = operationPutByIdNonStrictOptimize;
- }
+ JITPutByIdGenerator& gen = m_putByIds[m_putByIdIndex++];
- StructureStubInfo* stubInfo = m_codeBlock->addStubInfo();
+ Call call = callOperation(
+ gen.slowPathFunction(), gen.stubInfo(), regT3, regT2, regT1, regT0, ident->impl());
- Call call = callOperation(optimizedCall, stubInfo, regT3, regT2, regT1, regT0, ident->impl());
-
- // Track the location of the call; this will be used to recover patch information.
- m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex++].slowCaseInfo(coldPathBegin, call, stubInfo);
+ gen.reportSlowPathCall(coldPathBegin, call);
}
// Compile a store into an object's property storage. May overwrite base.
setBit(GPRInfo::toIndex(reg));
}
+ void set(JSValueRegs regs)
+ {
+ if (regs.tagGPR() != InvalidGPRReg)
+ set(regs.tagGPR());
+ set(regs.payloadGPR());
+ }
+
void setGPRByIndex(unsigned index)
{
ASSERT(index < GPRInfo::numberOfRegisters);