Refactor FTL sub snippet code to support general binary op snippets.
authormark.lam@apple.com <mark.lam@apple.com@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Tue, 1 Dec 2015 19:31:58 +0000 (19:31 +0000)
committermark.lam@apple.com <mark.lam@apple.com@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Tue, 1 Dec 2015 19:31:58 +0000 (19:31 +0000)
https://bugs.webkit.org/show_bug.cgi?id=151706

Reviewed by Geoffrey Garen.

* CMakeLists.txt:
* JavaScriptCore.vcxproj/JavaScriptCore.vcxproj:
* JavaScriptCore.vcxproj/JavaScriptCore.vcxproj.filters:
* JavaScriptCore.xcodeproj/project.pbxproj:

* ftl/FTLCompile.cpp:
- Moved the BinarySnippetRegisterContext to FTLCompileBinaryOp.cpp verbatim.
- Generalize generateArithSubICFastPath() to generateBinaryOpICFastPath().
  It now uses snippet specific helpers in FTLCompileBinaryOp.cpp to generate
  the fast paths.

* ftl/FTLCompileBinaryOp.cpp: Added.
(JSC::FTL::BinarySnippetRegisterContext::BinarySnippetRegisterContext):
(JSC::FTL::BinarySnippetRegisterContext::initializeRegisters):
(JSC::FTL::BinarySnippetRegisterContext::restoreRegisters):
- Moved here without changed from FTLCompile.cpp.
(JSC::FTL::generateArithSubFastPath):
* ftl/FTLCompileBinaryOp.h: Added.

* ftl/FTLInlineCacheDescriptor.h:
(JSC::FTL::BinaryOpDescriptor::nodeType):
(JSC::FTL::BinaryOpDescriptor::size):
(JSC::FTL::BinaryOpDescriptor::name):
(JSC::FTL::BinaryOpDescriptor::fastPathICName):
(JSC::FTL::BinaryOpDescriptor::slowPathFunction):
(JSC::FTL::BinaryOpDescriptor::leftOperand):
(JSC::FTL::BinaryOpDescriptor::rightOperand):
(JSC::FTL::BinaryOpDescriptor::BinaryOpDescriptor):
(JSC::FTL::ArithSubDescriptor::ArithSubDescriptor): Deleted.
(JSC::FTL::ArithSubDescriptor::leftType): Deleted.
(JSC::FTL::ArithSubDescriptor::rightType): Deleted.
- Refactor ArithSubDescriptor into BinaryOpDescriptor, and re-add a sub-class
  ArithSubDescriptor as specializations of BinaryOpDescriptor.

* ftl/FTLInlineCacheDescriptorInlines.h: Added.
(JSC::FTL::ArithSubDescriptor::ArithSubDescriptor):
(JSC::FTL::ArithSubDescriptor::icSize):

* ftl/FTLLowerDFGToLLVM.cpp:
(JSC::FTL::DFG::LowerDFGToLLVM::compileArithAddOrSub):
* ftl/FTLOSRExit.cpp:
(JSC::FTL::OSRExit::willArriveAtExitFromIndirectExceptionCheck):
(JSC::FTL::OSRExit::willArriveAtOSRExitFromCallOperation):
* ftl/FTLOSRExit.h:
* ftl/FTLState.h:

git-svn-id: https://svn.webkit.org/repository/webkit/trunk@192896 268f45cc-cd09-0410-ab3c-d52691b4dbfc

14 files changed:
Source/JavaScriptCore/CMakeLists.txt
Source/JavaScriptCore/ChangeLog
Source/JavaScriptCore/JavaScriptCore.vcxproj/JavaScriptCore.vcxproj
Source/JavaScriptCore/JavaScriptCore.vcxproj/JavaScriptCore.vcxproj.filters
Source/JavaScriptCore/JavaScriptCore.xcodeproj/project.pbxproj
Source/JavaScriptCore/ftl/FTLCompile.cpp
Source/JavaScriptCore/ftl/FTLCompileBinaryOp.cpp [new file with mode: 0644]
Source/JavaScriptCore/ftl/FTLCompileBinaryOp.h [new file with mode: 0644]
Source/JavaScriptCore/ftl/FTLInlineCacheDescriptor.h
Source/JavaScriptCore/ftl/FTLInlineCacheDescriptorInlines.h [new file with mode: 0644]
Source/JavaScriptCore/ftl/FTLLowerDFGToLLVM.cpp
Source/JavaScriptCore/ftl/FTLOSRExit.cpp
Source/JavaScriptCore/ftl/FTLOSRExit.h
Source/JavaScriptCore/ftl/FTLState.h

index 1ea9243..cae3cd2 100644 (file)
@@ -997,6 +997,7 @@ if (ENABLE_FTL_JIT)
         ftl/FTLCapabilities.cpp
         ftl/FTLCommonValues.cpp
         ftl/FTLCompile.cpp
+        ftl/FTLCompileBinaryOp.cpp
         ftl/FTLDWARFDebugLineInfo.cpp
         ftl/FTLDWARFRegister.cpp
         ftl/FTLDataSection.cpp
index ab49f9a..a397c30 100644 (file)
@@ -1,3 +1,56 @@
+2015-12-01  Mark Lam  <mark.lam@apple.com>
+
+        Refactor FTL sub snippet code to support general binary op snippets.
+        https://bugs.webkit.org/show_bug.cgi?id=151706
+
+        Reviewed by Geoffrey Garen.
+
+        * CMakeLists.txt:
+        * JavaScriptCore.vcxproj/JavaScriptCore.vcxproj:
+        * JavaScriptCore.vcxproj/JavaScriptCore.vcxproj.filters:
+        * JavaScriptCore.xcodeproj/project.pbxproj:
+
+        * ftl/FTLCompile.cpp:
+        - Moved the BinarySnippetRegisterContext to FTLCompileBinaryOp.cpp verbatim.
+        - Generalize generateArithSubICFastPath() to generateBinaryOpICFastPath().
+          It now uses snippet specific helpers in FTLCompileBinaryOp.cpp to generate
+          the fast paths.
+
+        * ftl/FTLCompileBinaryOp.cpp: Added.
+        (JSC::FTL::BinarySnippetRegisterContext::BinarySnippetRegisterContext):
+        (JSC::FTL::BinarySnippetRegisterContext::initializeRegisters):
+        (JSC::FTL::BinarySnippetRegisterContext::restoreRegisters):
+        - Moved here without changed from FTLCompile.cpp.
+        (JSC::FTL::generateArithSubFastPath):
+        * ftl/FTLCompileBinaryOp.h: Added.
+
+        * ftl/FTLInlineCacheDescriptor.h:
+        (JSC::FTL::BinaryOpDescriptor::nodeType):
+        (JSC::FTL::BinaryOpDescriptor::size):
+        (JSC::FTL::BinaryOpDescriptor::name):
+        (JSC::FTL::BinaryOpDescriptor::fastPathICName):
+        (JSC::FTL::BinaryOpDescriptor::slowPathFunction):
+        (JSC::FTL::BinaryOpDescriptor::leftOperand):
+        (JSC::FTL::BinaryOpDescriptor::rightOperand):
+        (JSC::FTL::BinaryOpDescriptor::BinaryOpDescriptor):
+        (JSC::FTL::ArithSubDescriptor::ArithSubDescriptor): Deleted.
+        (JSC::FTL::ArithSubDescriptor::leftType): Deleted.
+        (JSC::FTL::ArithSubDescriptor::rightType): Deleted.
+        - Refactor ArithSubDescriptor into BinaryOpDescriptor, and re-add a sub-class
+          ArithSubDescriptor as specializations of BinaryOpDescriptor.
+
+        * ftl/FTLInlineCacheDescriptorInlines.h: Added.
+        (JSC::FTL::ArithSubDescriptor::ArithSubDescriptor):
+        (JSC::FTL::ArithSubDescriptor::icSize):
+
+        * ftl/FTLLowerDFGToLLVM.cpp:
+        (JSC::FTL::DFG::LowerDFGToLLVM::compileArithAddOrSub):
+        * ftl/FTLOSRExit.cpp:
+        (JSC::FTL::OSRExit::willArriveAtExitFromIndirectExceptionCheck):
+        (JSC::FTL::OSRExit::willArriveAtOSRExitFromCallOperation):
+        * ftl/FTLOSRExit.h:
+        * ftl/FTLState.h:
+
 2015-12-01  Carlos Garcia Campos  <cgarcia@igalia.com>
 
         Unreviewed, rolling out r192876.
index 4950b7d..9a206a0 100644 (file)
     <ClCompile Include="..\ftl\FTLCapabilities.cpp" />
     <ClCompile Include="..\ftl\FTLCommonValues.cpp" />
     <ClCompile Include="..\ftl\FTLCompile.cpp" />
+    <ClCompile Include="..\ftl\FTLCompileBinaryOp.cpp" />
     <ClCompile Include="..\ftl\FTLDataSection.cpp" />
     <ClCompile Include="..\ftl\FTLDWARFDebugLineInfo.cpp" />
     <ClCompile Include="..\ftl\FTLDWARFRegister.cpp" />
     <ClInclude Include="..\ftl\FTLCapabilities.h" />
     <ClInclude Include="..\ftl\FTLCommonValues.h" />
     <ClInclude Include="..\ftl\FTLCompile.h" />
+    <ClInclude Include="..\ftl\FTLCompileBinaryOp.h" />
     <ClInclude Include="..\ftl\FTLDataSection.h" />
     <ClInclude Include="..\ftl\FTLDWARFDebugLineInfo.h" />
     <ClInclude Include="..\ftl\FTLDWARFRegister.h" />
     <ClInclude Include="..\ftl\FTLForOSREntryJITCode.h" />
     <ClInclude Include="..\ftl\FTLGeneratedFunction.h" />
     <ClInclude Include="..\ftl\FTLInlineCacheDescriptor.h" />
+    <ClInclude Include="..\ftl\FTLInlineCacheDescriptorInlines.h" />
     <ClInclude Include="..\ftl\FTLInlineCacheSize.h" />
     <ClInclude Include="..\ftl\FTLIntrinsicRepository.h" />
     <ClInclude Include="..\ftl\FTLJITCode.h" />
index f391f22..951c3c5 100644 (file)
     <ClCompile Include="..\ftl\FTLCompile.cpp">
       <Filter>ftl</Filter>
     </ClCompile>
+    <ClCompile Include="..\ftl\FTLCompileBinaryOp.cpp">
+      <Filter>ftl</Filter>
+    </ClCompile>
     <ClCompile Include="..\ftl\FTLDataSection.cpp">
       <Filter>ftl</Filter>
     </ClCompile>
     <ClInclude Include="..\ftl\FTLCompile.h">
       <Filter>ftl</Filter>
     </ClInclude>
+    <ClInclude Include="..\ftl\FTLCompileBinaryOp.h">
+      <Filter>ftl</Filter>
+    </ClInclude>
     <ClInclude Include="..\ftl\FTLDataSection.h">
       <Filter>ftl</Filter>
     </ClInclude>
     <ClInclude Include="..\ftl\FTLInlineCacheDescriptor.h">
       <Filter>ftl</Filter>
     </ClInclude>
+    <ClInclude Include="..\ftl\FTLInlineCacheDescriptorInlines.h">
+      <Filter>ftl</Filter>
+    </ClInclude>
     <ClInclude Include="..\ftl\FTLInlineCacheSize.h">
       <Filter>ftl</Filter>
     </ClInclude>
index b385c9c..68b9502 100644 (file)
                FE187A0D1C030D5C0038BBCA /* JITDivGenerator.h in Headers */ = {isa = PBXBuildFile; fileRef = FE187A0B1C0229230038BBCA /* JITDivGenerator.h */; settings = {ASSET_TAGS = (); }; };
                FE187A0E1C030D640038BBCA /* JITDivGenerator.cpp in Sources */ = {isa = PBXBuildFile; fileRef = FE187A0A1C0229230038BBCA /* JITDivGenerator.cpp */; settings = {ASSET_TAGS = (); }; };
                FE187A0F1C030D6C0038BBCA /* SnippetOperand.h in Headers */ = {isa = PBXBuildFile; fileRef = FE187A0C1C02EBA70038BBCA /* SnippetOperand.h */; settings = {ASSET_TAGS = (); }; };
+               FE187A181C0E13DD0038BBCA /* FTLInlineCacheDescriptorInlines.h in Headers */ = {isa = PBXBuildFile; fileRef = FE187A171C0E13C60038BBCA /* FTLInlineCacheDescriptorInlines.h */; settings = {ASSET_TAGS = (); }; };
+               FE187A191C0E13E30038BBCA /* FTLCompileBinaryOp.cpp in Sources */ = {isa = PBXBuildFile; fileRef = FE187A151C0E13C60038BBCA /* FTLCompileBinaryOp.cpp */; settings = {ASSET_TAGS = (); }; };
+               FE187A1A1C0E13E60038BBCA /* FTLCompileBinaryOp.h in Headers */ = {isa = PBXBuildFile; fileRef = FE187A161C0E13C60038BBCA /* FTLCompileBinaryOp.h */; settings = {ASSET_TAGS = (); }; };
                FE1C0FFD1B193E9800B53FCA /* Exception.h in Headers */ = {isa = PBXBuildFile; fileRef = FE1C0FFC1B193E9800B53FCA /* Exception.h */; settings = {ATTRIBUTES = (Private, ); }; };
                FE1C0FFF1B194FD100B53FCA /* Exception.cpp in Sources */ = {isa = PBXBuildFile; fileRef = FE1C0FFE1B194FD100B53FCA /* Exception.cpp */; };
                FE20CE9D15F04A9500DF3430 /* LLIntCLoop.cpp in Sources */ = {isa = PBXBuildFile; fileRef = FE20CE9B15F04A9500DF3430 /* LLIntCLoop.cpp */; };
                FE187A0A1C0229230038BBCA /* JITDivGenerator.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = JITDivGenerator.cpp; sourceTree = "<group>"; };
                FE187A0B1C0229230038BBCA /* JITDivGenerator.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = JITDivGenerator.h; sourceTree = "<group>"; };
                FE187A0C1C02EBA70038BBCA /* SnippetOperand.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = SnippetOperand.h; sourceTree = "<group>"; };
+               FE187A151C0E13C60038BBCA /* FTLCompileBinaryOp.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = FTLCompileBinaryOp.cpp; path = ftl/FTLCompileBinaryOp.cpp; sourceTree = "<group>"; };
+               FE187A161C0E13C60038BBCA /* FTLCompileBinaryOp.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = FTLCompileBinaryOp.h; path = ftl/FTLCompileBinaryOp.h; sourceTree = "<group>"; };
+               FE187A171C0E13C60038BBCA /* FTLInlineCacheDescriptorInlines.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = FTLInlineCacheDescriptorInlines.h; path = ftl/FTLInlineCacheDescriptorInlines.h; sourceTree = "<group>"; };
                FE1C0FFC1B193E9800B53FCA /* Exception.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Exception.h; sourceTree = "<group>"; };
                FE1C0FFE1B194FD100B53FCA /* Exception.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = Exception.cpp; sourceTree = "<group>"; };
                FE20CE9B15F04A9500DF3430 /* LLIntCLoop.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = LLIntCLoop.cpp; path = llint/LLIntCLoop.cpp; sourceTree = "<group>"; };
                                0FEA0A211709606900BB722C /* FTLCommonValues.h */,
                                0FEA0A00170513DB00BB722C /* FTLCompile.cpp */,
                                0FEA0A01170513DB00BB722C /* FTLCompile.h */,
+                               FE187A151C0E13C60038BBCA /* FTLCompileBinaryOp.cpp */,
+                               FE187A161C0E13C60038BBCA /* FTLCompileBinaryOp.h */,
                                0FE95F7718B5694700B531FB /* FTLDataSection.cpp */,
                                0FE95F7818B5694700B531FB /* FTLDataSection.h */,
                                2AC922B918A16182003CE0FB /* FTLDWARFDebugLineInfo.cpp */,
                                0FD8A31617D51F2200CA2C40 /* FTLForOSREntryJITCode.h */,
                                A78A977C179738D5009DF744 /* FTLGeneratedFunction.h */,
                                0F25F1A7181635F300522F39 /* FTLInlineCacheDescriptor.h */,
+                               FE187A171C0E13C60038BBCA /* FTLInlineCacheDescriptorInlines.h */,
                                0F25F1A8181635F300522F39 /* FTLInlineCacheSize.cpp */,
                                0F25F1A9181635F300522F39 /* FTLInlineCacheSize.h */,
                                0FEA0A261709623B00BB722C /* FTLIntrinsicRepository.cpp */,
                        files = (
                                0FFA549816B8835300B3A982 /* A64DOpcode.h in Headers */,
                                0F1FE51C1922A3BC006987C5 /* AbortReason.h in Headers */,
+                               FE187A1A1C0E13E60038BBCA /* FTLCompileBinaryOp.h in Headers */,
                                860161E30F3A83C100F84710 /* AbstractMacroAssembler.h in Headers */,
                                0F55F0F514D1063C00AC7649 /* AbstractPC.h in Headers */,
                                0FEC856E1BDACDC70080FF74 /* AirAllocateStack.h in Headers */,
                                C2C8D02D14A3C6E000578E65 /* CopiedSpaceInlines.h in Headers */,
                                0F7C11AD1BC3862C00C74CDB /* CopyBarrier.h in Headers */,
                                0F5A52D017ADD717008ECB2D /* CopyToken.h in Headers */,
+                               FE187A181C0E13DD0038BBCA /* FTLInlineCacheDescriptorInlines.h in Headers */,
                                C2239D1816262BDD005AC5FD /* CopyVisitor.h in Headers */,
                                C2239D1916262BDD005AC5FD /* CopyVisitorInlines.h in Headers */,
                                C218D1401655CFD50062BB81 /* CopyWorkList.h in Headers */,
                                0FB17662196B8F9E0091052A /* DFGPureValue.cpp in Sources */,
                                0F3A1BF91A9ECB7D000DE01A /* DFGPutStackSinkingPhase.cpp in Sources */,
                                0F2FCCFB18A60070001A27F8 /* DFGSafepoint.cpp in Sources */,
+                               FE187A191C0E13E30038BBCA /* FTLCompileBinaryOp.cpp in Sources */,
                                86EC9DD21328DF82002B2AD7 /* DFGSpeculativeJIT.cpp in Sources */,
                                86880F1F14328BB900B08D42 /* DFGSpeculativeJIT32_64.cpp in Sources */,
                                86880F4D14353B2100B08D42 /* DFGSpeculativeJIT64.cpp in Sources */,
index 179c569..948b587 100644 (file)
 #include "DFGOperations.h"
 #include "DataView.h"
 #include "Disassembler.h"
+#include "FTLCompileBinaryOp.h"
 #include "FTLExceptionHandlerManager.h"
 #include "FTLExitThunkGenerator.h"
+#include "FTLInlineCacheDescriptorInlines.h"
 #include "FTLInlineCacheSize.h"
 #include "FTLJITCode.h"
 #include "FTLThunks.h"
 #include "FTLUnwindInfo.h"
-#include "JITSubGenerator.h"
 #include "LLVMAPI.h"
 #include "LinkBuffer.h"
 #include "ScratchRegisterAllocator.h"
@@ -307,125 +308,12 @@ static void generateCheckInICFastPath(
     }
 }
 
-class BinarySnippetRegisterContext {
-    // The purpose of this class is to shuffle registers to get them into the state
-    // that baseline code expects so that we can use the baseline snippet generators i.e.
-    //    1. Ensure that the inputs and output are not in reserved registers (which
-    //       include the tag registers). The snippet will use these reserved registers.
-    //       Hence, we need to put the inputs and output in other scratch registers.
-    //    2. Tag registers are loaded with the expected values.
-    //
-    // When the snippet is done:
-    //    1. If we had re-assigned the result register to a scratch, we need to copy the
-    //       result back from the scratch.
-    //    2. Restore the input and tag registers to the values that LLVM put there originally.
-    //       That is unless when one of them is also the result register. In that case, we
-    //       don't want to trash the result, and hence, should not restore into it.
-
-public:
-    BinarySnippetRegisterContext(ScratchRegisterAllocator& allocator, GPRReg& result, GPRReg& left, GPRReg& right)
-        : m_allocator(allocator)
-        , m_result(result)
-        , m_left(left)
-        , m_right(right)
-        , m_origResult(result)
-        , m_origLeft(left)
-        , m_origRight(right)
-    {
-        m_allocator.lock(m_result);
-        m_allocator.lock(m_left);
-        m_allocator.lock(m_right);
-
-        RegisterSet inputAndOutputRegisters = RegisterSet(m_left, m_right, m_result);
-        RegisterSet reservedRegisters;
-        for (GPRReg reg : GPRInfo::reservedRegisters())
-            reservedRegisters.set(reg);
-
-        if (reservedRegisters.get(m_left))
-            m_left = m_allocator.allocateScratchGPR();
-        if (reservedRegisters.get(m_right)) {
-            if (m_origRight == m_origLeft)
-                m_right = m_left;
-            else
-                m_right = m_allocator.allocateScratchGPR();
-        }
-        if (reservedRegisters.get(m_result)) {
-            if (m_origResult == m_origLeft)
-                m_result = m_left;
-            else if (m_origResult == m_origRight)
-                m_result = m_right;
-            else
-                m_result = m_allocator.allocateScratchGPR();
-        }
-
-        if (!inputAndOutputRegisters.get(GPRInfo::tagMaskRegister))
-            m_savedTagMaskRegister = m_allocator.allocateScratchGPR();
-        if (!inputAndOutputRegisters.get(GPRInfo::tagTypeNumberRegister))
-            m_savedTagTypeNumberRegister = m_allocator.allocateScratchGPR();
-    }
-
-    void initializeRegisters(CCallHelpers& jit)
-    {
-        if (m_left != m_origLeft)
-            jit.move(m_origLeft, m_left);
-        if (m_right != m_origRight && m_origRight != m_origLeft)
-            jit.move(m_origRight, m_right);
-
-        if (m_savedTagMaskRegister != InvalidGPRReg)
-            jit.move(GPRInfo::tagMaskRegister, m_savedTagMaskRegister);
-        if (m_savedTagTypeNumberRegister != InvalidGPRReg)
-            jit.move(GPRInfo::tagTypeNumberRegister, m_savedTagTypeNumberRegister);
-
-        jit.emitMaterializeTagCheckRegisters();
-    }
-
-    void restoreRegisters(CCallHelpers& jit)
-    {
-        if (m_origResult != m_result)
-            jit.move(m_result, m_origResult);
-        if (m_origLeft != m_left && m_origLeft != m_origResult)
-            jit.move(m_left, m_origLeft);
-        if (m_origRight != m_right && m_origRight != m_origResult && m_origRight != m_origLeft)
-            jit.move(m_right, m_origRight);
-
-        // We are guaranteed that the tag registers are not the same as the original input
-        // or output registers. Otherwise, we would not have allocated a scratch for them.
-        // Hence, we don't need to need to check for overlap like we do for the input registers.
-        if (m_savedTagMaskRegister != InvalidGPRReg) {
-            ASSERT(GPRInfo::tagMaskRegister != m_origLeft);
-            ASSERT(GPRInfo::tagMaskRegister != m_origRight);
-            ASSERT(GPRInfo::tagMaskRegister != m_origResult);
-            jit.move(m_savedTagMaskRegister, GPRInfo::tagMaskRegister);
-        }
-        if (m_savedTagTypeNumberRegister != InvalidGPRReg) {
-            ASSERT(GPRInfo::tagTypeNumberRegister != m_origLeft);
-            ASSERT(GPRInfo::tagTypeNumberRegister != m_origRight);
-            ASSERT(GPRInfo::tagTypeNumberRegister != m_origResult);
-            jit.move(m_savedTagTypeNumberRegister, GPRInfo::tagTypeNumberRegister);
-        }
-    }
-
-private:
-    ScratchRegisterAllocator& m_allocator;
-
-    GPRReg& m_result;
-    GPRReg& m_left;
-    GPRReg& m_right;
-
-    GPRReg m_origResult;
-    GPRReg m_origLeft;
-    GPRReg m_origRight;
-
-    GPRReg m_savedTagMaskRegister { InvalidGPRReg };
-    GPRReg m_savedTagTypeNumberRegister { InvalidGPRReg };
-};
-
-static void generateArithSubICFastPath(
+static void generateBinaryOpICFastPath(
     State& state, CodeBlock* codeBlock, GeneratedFunction generatedFunction,
-    StackMaps::RecordMap& recordMap, ArithSubDescriptor& ic)
+    StackMaps::RecordMap& recordMap, BinaryOpDescriptor& ic)
 {
     VM& vm = state.graph.m_vm;
-    size_t sizeOfIC = sizeOfArithSub();
+    size_t sizeOfIC = ic.size();
 
     StackMaps::RecordMap::iterator iter = recordMap.find(ic.stackmapID());
     if (iter == recordMap.end())
@@ -440,46 +328,25 @@ static void generateArithSubICFastPath(
 
         CCallHelpers fastPathJIT(&vm, codeBlock);
 
-        SnippetOperand leftOperand(ic.leftType());
-        SnippetOperand rightOperand(ic.rightType());
-
         GPRReg result = record.locations[0].directGPR();
         GPRReg left = record.locations[1].directGPR();
         GPRReg right = record.locations[2].directGPR();
-
         RegisterSet usedRegisters = usedRegistersFor(record);
-        ScratchRegisterAllocator allocator(usedRegisters);
 
-        BinarySnippetRegisterContext context(allocator, result, left, right);
+        CCallHelpers::Jump done;
+        CCallHelpers::Jump slowPathStart;
 
-        GPRReg scratchGPR = allocator.allocateScratchGPR();
-        FPRReg leftFPR = allocator.allocateScratchFPR();
-        FPRReg rightFPR = allocator.allocateScratchFPR();
-        FPRReg scratchFPR = InvalidFPRReg;
-
-        JITSubGenerator gen(leftOperand, rightOperand, JSValueRegs(result), JSValueRegs(left), JSValueRegs(right), leftFPR, rightFPR, scratchGPR, scratchFPR);
-
-        auto numberOfBytesUsedToPreserveReusedRegisters =
-            allocator.preserveReusedRegistersByPushing(fastPathJIT, ScratchRegisterAllocator::ExtraStackSpace::NoExtraSpace);
-
-        context.initializeRegisters(fastPathJIT);
-        gen.generateFastPath(fastPathJIT);
-
-        ASSERT(gen.didEmitFastPath());
-        gen.endJumpList().link(&fastPathJIT);
-        context.restoreRegisters(fastPathJIT);
-        allocator.restoreReusedRegistersByPopping(fastPathJIT, numberOfBytesUsedToPreserveReusedRegisters,
-            ScratchRegisterAllocator::ExtraStackSpace::SpaceForCCall);
-        CCallHelpers::Jump done = fastPathJIT.jump();
-
-        gen.slowPathJumpList().link(&fastPathJIT);
-        context.restoreRegisters(fastPathJIT);
-        allocator.restoreReusedRegistersByPopping(fastPathJIT, numberOfBytesUsedToPreserveReusedRegisters,
-            ScratchRegisterAllocator::ExtraStackSpace::SpaceForCCall);
-        CCallHelpers::Jump slowPathStart = fastPathJIT.jump();
+        switch (ic.nodeType()) {
+        case ArithSub:
+            generateArithSubFastPath(ic, fastPathJIT, result, left, right, usedRegisters, done, slowPathStart);
+            break;
+        default:
+            RELEASE_ASSERT_NOT_REACHED();
+        }
 
         char* startOfIC = bitwise_cast<char*>(generatedFunction) + record.instructionOffset;
-        generateInlineIfPossibleOutOfLineIfNot(state, vm, codeBlock, fastPathJIT, startOfIC, sizeOfIC, "ArithSub inline cache fast path", [&] (LinkBuffer& linkBuffer, CCallHelpers&, bool) {
+        const char* fastPathICName = ic.fastPathICName();
+        generateInlineIfPossibleOutOfLineIfNot(state, vm, codeBlock, fastPathJIT, startOfIC, sizeOfIC, fastPathICName, [&] (LinkBuffer& linkBuffer, CCallHelpers&, bool) {
             linkBuffer.link(done, CodeLocationLabel(startOfIC + sizeOfIC));
             state.finalizer->sideCodeLinkBuffer->link(ic.m_slowPathDone[i], CodeLocationLabel(startOfIC + sizeOfIC));
             
@@ -650,7 +517,7 @@ static void fixFunctionBasedOnStackMaps(
                 exit.m_exceptionHandlerCallSiteIndex = callSiteIndex;
 
                 OSRExit* callOperationExit = nullptr;
-                if (exitDescriptor.m_exceptionType == ExceptionType::SubGenerator) {
+                if (exitDescriptor.m_exceptionType == ExceptionType::BinaryOpGenerator) {
                     exceptionHandlerManager.addNewCallOperationExit(iter->value[j].index, state.jitCode->osrExit.size() - 1);
                     callOperationExit = &exit;
                 } else
@@ -694,7 +561,7 @@ static void fixFunctionBasedOnStackMaps(
                     GPRReg base = record.locations[1].directGPR();
                     if (base == result)
                         callOperationExit->registersToPreserveForCallThatMightThrow.set(base);
-                } else if (exitDescriptor.m_exceptionType == ExceptionType::SubGenerator) {
+                } else if (exitDescriptor.m_exceptionType == ExceptionType::BinaryOpGenerator) {
                     GPRReg result = record.locations[0].directGPR();
                     GPRReg left = record.locations[1].directGPR();
                     GPRReg right = record.locations[2].directGPR();
@@ -755,7 +622,7 @@ static void fixFunctionBasedOnStackMaps(
     if (!state.getByIds.isEmpty()
         || !state.putByIds.isEmpty()
         || !state.checkIns.isEmpty()
-        || !state.arithSubs.isEmpty()
+        || !state.binaryOps.isEmpty()
         || !state.lazySlowPaths.isEmpty()) {
         CCallHelpers slowPathJIT(&vm, codeBlock);
         
@@ -903,17 +770,17 @@ static void fixFunctionBasedOnStackMaps(
             }
         }
 
-        for (size_t i = state.arithSubs.size(); i--;) {
-            ArithSubDescriptor& arithSub = state.arithSubs[i];
+        for (size_t i = state.binaryOps.size(); i--;) {
+            BinaryOpDescriptor& binaryOp = state.binaryOps[i];
             
             if (verboseCompilationEnabled())
-                dataLog("Handling ArithSub stackmap #", arithSub.stackmapID(), "\n");
+                dataLog("Handling ", binaryOp.name(), " stackmap #", binaryOp.stackmapID(), "\n");
             
-            auto iter = recordMap.find(arithSub.stackmapID());
+            auto iter = recordMap.find(binaryOp.stackmapID());
             if (iter == recordMap.end())
                 continue; // It was optimized out.
             
-            CodeOrigin codeOrigin = arithSub.codeOrigin();
+            CodeOrigin codeOrigin = binaryOp.codeOrigin();
             for (unsigned i = 0; i < iter->value.size(); ++i) {
                 StackMaps::Record& record = iter->value[i].record;
                 RegisterSet usedRegisters = usedRegistersFor(record);
@@ -922,7 +789,7 @@ static void fixFunctionBasedOnStackMaps(
                 GPRReg left = record.locations[1].directGPR();
                 GPRReg right = record.locations[2].directGPR();
 
-                arithSub.m_slowPathStarts.append(slowPathJIT.label());
+                binaryOp.m_slowPathStarts.append(slowPathJIT.label());
                 bool addedUniqueExceptionJump = addNewExceptionJumpIfNecessary(iter->value[i].index);
                 if (result == left || result == right) {
                     // This situation has a really interesting register preservation story.
@@ -932,9 +799,9 @@ static void fixFunctionBasedOnStackMaps(
                 }
 
                 callOperation(state, usedRegisters, slowPathJIT, codeOrigin, addedUniqueExceptionJump ? &exceptionJumpsToLink.last().first : &exceptionTarget,
-                    operationValueSub, result, left, right).call();
+                    binaryOp.slowPathFunction(), result, left, right).call();
 
-                arithSub.m_slowPathDone.append(slowPathJIT.jump());
+                binaryOp.m_slowPathDone.append(slowPathJIT.jump());
             }
         }
 
@@ -1022,9 +889,9 @@ static void fixFunctionBasedOnStackMaps(
                 state, codeBlock, generatedFunction, recordMap, state.checkIns[i],
                 sizeOfIn()); 
         }
-        for (unsigned i = state.arithSubs.size(); i--;) {
-            ArithSubDescriptor& arithSub = state.arithSubs[i];
-            generateArithSubICFastPath(state, codeBlock, generatedFunction, recordMap, arithSub);
+        for (unsigned i = state.binaryOps.size(); i--;) {
+            BinaryOpDescriptor& binaryOp = state.binaryOps[i];
+            generateBinaryOpICFastPath(state, codeBlock, generatedFunction, recordMap, binaryOp);
         }
         for (unsigned i = state.lazySlowPaths.size(); i--;) {
             LazySlowPathDescriptor& lazySlowPath = state.lazySlowPaths[i];
diff --git a/Source/JavaScriptCore/ftl/FTLCompileBinaryOp.cpp b/Source/JavaScriptCore/ftl/FTLCompileBinaryOp.cpp
new file mode 100644 (file)
index 0000000..4f455ee
--- /dev/null
@@ -0,0 +1,193 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "FTLCompileBinaryOp.h"
+
+#if ENABLE(FTL_JIT)
+
+#include "DFGNodeType.h"
+#include "FTLInlineCacheDescriptor.h"
+#include "GPRInfo.h"
+#include "JITSubGenerator.h"
+#include "ScratchRegisterAllocator.h"
+
+namespace JSC { namespace FTL {
+
+using namespace DFG;
+
+class BinarySnippetRegisterContext {
+    // The purpose of this class is to shuffle registers to get them into the state
+    // that baseline code expects so that we can use the baseline snippet generators i.e.
+    //    1. Ensure that the inputs and output are not in reserved registers (which
+    //       include the tag registers). The snippet will use these reserved registers.
+    //       Hence, we need to put the inputs and output in other scratch registers.
+    //    2. Tag registers are loaded with the expected values.
+    //
+    // When the snippet is done:
+    //    1. If we had re-assigned the result register to a scratch, we need to copy the
+    //       result back from the scratch.
+    //    2. Restore the input and tag registers to the values that LLVM put there originally.
+    //       That is unless when one of them is also the result register. In that case, we
+    //       don't want to trash the result, and hence, should not restore into it.
+
+public:
+    BinarySnippetRegisterContext(ScratchRegisterAllocator& allocator, GPRReg& result, GPRReg& left, GPRReg& right)
+        : m_allocator(allocator)
+        , m_result(result)
+        , m_left(left)
+        , m_right(right)
+        , m_origResult(result)
+        , m_origLeft(left)
+        , m_origRight(right)
+    {
+        m_allocator.lock(m_result);
+        m_allocator.lock(m_left);
+        m_allocator.lock(m_right);
+
+        RegisterSet inputAndOutputRegisters = RegisterSet(m_left, m_right, m_result);
+        RegisterSet reservedRegisters;
+        for (GPRReg reg : GPRInfo::reservedRegisters())
+            reservedRegisters.set(reg);
+
+        if (reservedRegisters.get(m_left))
+            m_left = m_allocator.allocateScratchGPR();
+        if (reservedRegisters.get(m_right)) {
+            if (m_origRight == m_origLeft)
+                m_right = m_left;
+            else
+                m_right = m_allocator.allocateScratchGPR();
+        }
+        if (reservedRegisters.get(m_result)) {
+            if (m_origResult == m_origLeft)
+                m_result = m_left;
+            else if (m_origResult == m_origRight)
+                m_result = m_right;
+            else
+                m_result = m_allocator.allocateScratchGPR();
+        }
+
+        if (!inputAndOutputRegisters.get(GPRInfo::tagMaskRegister))
+            m_savedTagMaskRegister = m_allocator.allocateScratchGPR();
+        if (!inputAndOutputRegisters.get(GPRInfo::tagTypeNumberRegister))
+            m_savedTagTypeNumberRegister = m_allocator.allocateScratchGPR();
+    }
+
+    void initializeRegisters(CCallHelpers& jit)
+    {
+        if (m_left != m_origLeft)
+            jit.move(m_origLeft, m_left);
+        if (m_right != m_origRight && m_origRight != m_origLeft)
+            jit.move(m_origRight, m_right);
+        
+        if (m_savedTagMaskRegister != InvalidGPRReg)
+            jit.move(GPRInfo::tagMaskRegister, m_savedTagMaskRegister);
+        if (m_savedTagTypeNumberRegister != InvalidGPRReg)
+            jit.move(GPRInfo::tagTypeNumberRegister, m_savedTagTypeNumberRegister);
+
+        jit.emitMaterializeTagCheckRegisters();
+    }
+
+    void restoreRegisters(CCallHelpers& jit)
+    {
+        if (m_origResult != m_result)
+            jit.move(m_result, m_origResult);
+        if (m_origLeft != m_left && m_origLeft != m_origResult)
+            jit.move(m_left, m_origLeft);
+        if (m_origRight != m_right && m_origRight != m_origResult && m_origRight != m_origLeft)
+            jit.move(m_right, m_origRight);
+
+        // We are guaranteed that the tag registers are not the same as the original input
+        // or output registers. Otherwise, we would not have allocated a scratch for them.
+        // Hence, we don't need to need to check for overlap like we do for the input registers.
+        if (m_savedTagMaskRegister != InvalidGPRReg) {
+            ASSERT(GPRInfo::tagMaskRegister != m_origLeft);
+            ASSERT(GPRInfo::tagMaskRegister != m_origRight);
+            ASSERT(GPRInfo::tagMaskRegister != m_origResult);
+            jit.move(m_savedTagMaskRegister, GPRInfo::tagMaskRegister);
+        }
+        if (m_savedTagTypeNumberRegister != InvalidGPRReg) {
+            ASSERT(GPRInfo::tagTypeNumberRegister != m_origLeft);
+            ASSERT(GPRInfo::tagTypeNumberRegister != m_origRight);
+            ASSERT(GPRInfo::tagTypeNumberRegister != m_origResult);
+            jit.move(m_savedTagTypeNumberRegister, GPRInfo::tagTypeNumberRegister);
+        }
+    }
+
+private:
+    ScratchRegisterAllocator& m_allocator;
+
+    GPRReg& m_result;
+    GPRReg& m_left;
+    GPRReg& m_right;
+
+    GPRReg m_origResult;
+    GPRReg m_origLeft;
+    GPRReg m_origRight;
+
+    GPRReg m_savedTagMaskRegister { InvalidGPRReg };
+    GPRReg m_savedTagTypeNumberRegister { InvalidGPRReg };
+};
+
+void generateArithSubFastPath(BinaryOpDescriptor& ic, CCallHelpers& jit,
+    GPRReg result, GPRReg left, GPRReg right, RegisterSet usedRegisters,
+    CCallHelpers::Jump& done, CCallHelpers::Jump& slowPathStart)
+{
+    ASSERT(ic.nodeType() == ArithSub);
+    ScratchRegisterAllocator allocator(usedRegisters);
+
+    BinarySnippetRegisterContext context(allocator, result, left, right);
+
+    GPRReg scratchGPR = allocator.allocateScratchGPR();
+    FPRReg leftFPR = allocator.allocateScratchFPR();
+    FPRReg rightFPR = allocator.allocateScratchFPR();
+    FPRReg scratchFPR = InvalidFPRReg;
+
+    JITSubGenerator gen(ic.leftOperand(), ic.rightOperand(), JSValueRegs(result),
+        JSValueRegs(left), JSValueRegs(right), leftFPR, rightFPR, scratchGPR, scratchFPR);
+
+    auto numberOfBytesUsedToPreserveReusedRegisters =
+    allocator.preserveReusedRegistersByPushing(jit, ScratchRegisterAllocator::ExtraStackSpace::NoExtraSpace);
+
+    context.initializeRegisters(jit);
+    gen.generateFastPath(jit);
+
+    ASSERT(gen.didEmitFastPath());
+    gen.endJumpList().link(&jit);
+    context.restoreRegisters(jit);
+    allocator.restoreReusedRegistersByPopping(jit, numberOfBytesUsedToPreserveReusedRegisters,
+        ScratchRegisterAllocator::ExtraStackSpace::SpaceForCCall);
+    done = jit.jump();
+
+    gen.slowPathJumpList().link(&jit);
+    context.restoreRegisters(jit);
+    allocator.restoreReusedRegistersByPopping(jit, numberOfBytesUsedToPreserveReusedRegisters,
+        ScratchRegisterAllocator::ExtraStackSpace::SpaceForCCall);
+    slowPathStart = jit.jump();
+}
+
+} } // namespace JSC::FTL
+
+#endif // ENABLE(FTL_JIT)
diff --git a/Source/JavaScriptCore/ftl/FTLCompileBinaryOp.h b/Source/JavaScriptCore/ftl/FTLCompileBinaryOp.h
new file mode 100644 (file)
index 0000000..a8549c5
--- /dev/null
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef FTLCompileBinaryOp_h
+#define FTLCompileBinaryOp_h
+
+#if ENABLE(FTL_JIT)
+
+#include "CCallHelpers.h"
+
+namespace JSC {
+namespace FTL {
+
+class BinaryOpDescriptor;
+
+void generateArithSubFastPath(BinaryOpDescriptor&, CCallHelpers&,
+    GPRReg result, GPRReg left, GPRReg right, RegisterSet usedRegisters,
+    CCallHelpers::Jump& done, CCallHelpers::Jump& slowPathStart);
+
+} // namespace FTL
+} // namespace JSC
+
+#endif // ENABLE(FTL_JIT)
+
+#endif // FTLCompileBinaryOp_h
index 50fad72..e819063 100644 (file)
 #if ENABLE(FTL_JIT)
 
 #include "CodeOrigin.h"
+#include "DFGAbstractValue.h"
 #include "FTLLazySlowPath.h"
 #include "JITInlineCacheGenerator.h"
 #include "MacroAssembler.h"
+#include "SnippetOperand.h"
 #include <wtf/text/UniquedStringImpl.h>
 
 namespace JSC { namespace FTL {
@@ -123,23 +125,50 @@ public:
     Vector<CheckInGenerator> m_generators;
 };
 
-class ArithSubDescriptor : public InlineCacheDescriptor {
+class BinaryOpDescriptor : public InlineCacheDescriptor {
 public:
-    ArithSubDescriptor(unsigned stackmapID, CodeOrigin codeOrigin, ResultType leftType, ResultType rightType)
+    typedef EncodedJSValue (*SlowPathFunction)(ExecState*, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2);
+
+    unsigned nodeType() const { return m_nodeType; }
+    size_t size() const { return m_size; }
+    const char* name() const { return m_name; }
+    const char* fastPathICName() const { return m_fastPathICName; }
+    SlowPathFunction slowPathFunction() const { return m_slowPathFunction; }
+
+    SnippetOperand leftOperand() { return m_leftOperand; }
+    SnippetOperand rightOperand() { return m_rightOperand; }
+
+    Vector<MacroAssembler::Label> m_slowPathStarts;
+
+protected:
+    BinaryOpDescriptor(unsigned nodeType, unsigned stackmapID, CodeOrigin codeOrigin,
+        size_t size, const char* name, const char* fastPathICName,
+        SlowPathFunction slowPathFunction, const SnippetOperand& leftOperand, const SnippetOperand& rightOperand)
         : InlineCacheDescriptor(stackmapID, codeOrigin, nullptr)
-        , m_leftType(leftType)
-        , m_rightType(rightType)
+        , m_nodeType(nodeType)
+        , m_size(size)
+        , m_name(name)
+        , m_fastPathICName(fastPathICName)
+        , m_slowPathFunction(slowPathFunction)
+        , m_leftOperand(leftOperand)
+        , m_rightOperand(rightOperand)
     {
     }
 
-    ResultType leftType() const { return m_leftType; }
-    ResultType rightType() const { return m_rightType; }
-    
-    Vector<MacroAssembler::Label> m_slowPathStarts;
+    unsigned m_nodeType;
+    size_t m_size;
+    const char* m_name;
+    const char* m_fastPathICName;
+    SlowPathFunction m_slowPathFunction;
 
-private:
-    ResultType m_leftType;
-    ResultType m_rightType;
+    SnippetOperand m_leftOperand;
+    SnippetOperand m_rightOperand;
+};
+
+class ArithSubDescriptor : public BinaryOpDescriptor {
+public:
+    ArithSubDescriptor(unsigned stackmapID, CodeOrigin, const SnippetOperand& leftOperand, const SnippetOperand& rightOperand);
+    static size_t icSize();
 };
 
 // You can create a lazy slow path call in lowerDFGToLLVM by doing:
diff --git a/Source/JavaScriptCore/ftl/FTLInlineCacheDescriptorInlines.h b/Source/JavaScriptCore/ftl/FTLInlineCacheDescriptorInlines.h
new file mode 100644 (file)
index 0000000..3b34206
--- /dev/null
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef FTLInlineCacheDescriptorInlines_h
+#define FTLInlineCacheDescriptorInlines_h
+
+#if ENABLE(FTL_JIT)
+
+#include "DFGNodeType.h"
+#include "DFGOperations.h"
+#include "FTLInlineCacheDescriptor.h"
+#include "FTLInlineCacheSize.h"
+
+namespace JSC { namespace FTL {
+
+ArithSubDescriptor::ArithSubDescriptor(unsigned stackmapID, CodeOrigin codeOrigin,
+    const SnippetOperand& leftOperand, const SnippetOperand& rightOperand)
+    : BinaryOpDescriptor(DFG::ArithSub, stackmapID, codeOrigin, icSize(),
+        "ArithSub", "ArithSub IC fast path", DFG::operationValueSub, leftOperand, rightOperand)
+{
+}
+
+size_t ArithSubDescriptor::icSize()
+{
+    return sizeOfArithSub();
+}
+
+} } // namespace JSC::FTL
+
+#endif // ENABLE(FTL_JIT)
+
+#endif // FTLInlineCacheDescriptorInlines_h
index 4adc1aa..eac0e72 100644 (file)
@@ -1655,20 +1655,21 @@ private:
             // Arguments: id, bytes, target, numArgs, args...
             StackmapArgumentList arguments;
             arguments.append(m_out.constInt64(stackmapID));
-            arguments.append(m_out.constInt32(sizeOfArithSub()));
+            arguments.append(m_out.constInt32(ArithSubDescriptor::icSize()));
             arguments.append(constNull(m_out.ref8));
             arguments.append(m_out.constInt32(2));
             arguments.append(left);
             arguments.append(right);
 
-            appendOSRExitArgumentsForPatchpointIfWillCatchException(arguments, ExceptionType::SubGenerator, 3); // left, right, and result show up in the stackmap locations.
+            appendOSRExitArgumentsForPatchpointIfWillCatchException(arguments,
+                ExceptionType::BinaryOpGenerator, 3); // left, right, and result show up in the stackmap locations.
 
             LValue call = m_out.call(m_out.int64, m_out.patchpointInt64Intrinsic(), arguments);
             setInstructionCallingConvention(call, LLVMAnyRegCallConv);
 
-            m_ftlState.arithSubs.append(ArithSubDescriptor(stackmapID, m_node->origin.semantic,
-                abstractValue(m_node->child1()).resultType(),
-                abstractValue(m_node->child2()).resultType()));
+            SnippetOperand leftOperand(abstractValue(m_node->child1()).resultType());
+            SnippetOperand rightOperand(abstractValue(m_node->child2()).resultType());
+            m_ftlState.binaryOps.append(ArithSubDescriptor(stackmapID, m_node->origin.semantic, leftOperand, rightOperand));
 
             setJSValue(call);
 #endif
index b607134..3194da5 100644 (file)
@@ -172,7 +172,7 @@ bool OSRExit::willArriveAtExitFromIndirectExceptionCheck() const
     case ExceptionType::GetById:
     case ExceptionType::PutById:
     case ExceptionType::LazySlowPath:
-    case ExceptionType::SubGenerator:
+    case ExceptionType::BinaryOpGenerator:
     case ExceptionType::GetByIdCallOperation:
     case ExceptionType::PutByIdCallOperation:
         return true;
@@ -200,7 +200,7 @@ bool OSRExit::willArriveAtOSRExitFromCallOperation() const
     switch (m_exceptionType) {
     case ExceptionType::GetByIdCallOperation:
     case ExceptionType::PutByIdCallOperation:
-    case ExceptionType::SubGenerator:
+    case ExceptionType::BinaryOpGenerator:
         return true;
     default:
         return false;
index 32dfe92..d1341f9 100644 (file)
@@ -145,7 +145,7 @@ enum class ExceptionType : uint8_t {
     PutById,
     PutByIdCallOperation,
     LazySlowPath,
-    SubGenerator,
+    BinaryOpGenerator,
 };
 
 struct OSRExitDescriptor {
index 58ef276..b606f3c 100644 (file)
@@ -91,7 +91,7 @@ public:
     SegmentedVector<GetByIdDescriptor> getByIds;
     SegmentedVector<PutByIdDescriptor> putByIds;
     SegmentedVector<CheckInDescriptor> checkIns;
-    SegmentedVector<ArithSubDescriptor> arithSubs;
+    SegmentedVector<BinaryOpDescriptor> binaryOps;
     SegmentedVector<LazySlowPathDescriptor> lazySlowPaths;
 #if ENABLE(MASM_PROBE)
     SegmentedVector<ProbeDescriptor> probes;