Unreviewed, rolling out r172940.
authorcommit-queue@webkit.org <commit-queue@webkit.org@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Tue, 26 Aug 2014 16:46:10 +0000 (16:46 +0000)
committercommit-queue@webkit.org <commit-queue@webkit.org@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Tue, 26 Aug 2014 16:46:10 +0000 (16:46 +0000)
https://bugs.webkit.org/show_bug.cgi?id=136256

Caused assertions on fast/storage/serialized-script-
value.html, and possibly flakiness on more tests (Requested by
ap on #webkit).

Reverted changeset:

"FTL should be able to do polymorphic call inlining"
https://bugs.webkit.org/show_bug.cgi?id=135145
http://trac.webkit.org/changeset/172940

git-svn-id: https://svn.webkit.org/repository/webkit/trunk@172961 268f45cc-cd09-0410-ab3c-d52691b4dbfc

74 files changed:
LayoutTests/ChangeLog
LayoutTests/js/regress/script-tests/simple-poly-call-nested.js [deleted file]
LayoutTests/js/regress/script-tests/simple-poly-call.js [deleted file]
LayoutTests/js/regress/simple-poly-call-expected.txt [deleted file]
LayoutTests/js/regress/simple-poly-call-nested-expected.txt [deleted file]
LayoutTests/js/regress/simple-poly-call-nested.html [deleted file]
LayoutTests/js/regress/simple-poly-call.html [deleted file]
Source/JavaScriptCore/CMakeLists.txt
Source/JavaScriptCore/ChangeLog
Source/JavaScriptCore/JavaScriptCore.vcxproj/JavaScriptCore.vcxproj
Source/JavaScriptCore/JavaScriptCore.xcodeproj/project.pbxproj
Source/JavaScriptCore/bytecode/CallEdge.cpp [deleted file]
Source/JavaScriptCore/bytecode/CallEdge.h [deleted file]
Source/JavaScriptCore/bytecode/CallEdgeProfile.cpp [deleted file]
Source/JavaScriptCore/bytecode/CallEdgeProfile.h [deleted file]
Source/JavaScriptCore/bytecode/CallEdgeProfileInlines.h [deleted file]
Source/JavaScriptCore/bytecode/CallLinkInfo.cpp
Source/JavaScriptCore/bytecode/CallLinkInfo.h
Source/JavaScriptCore/bytecode/CallLinkStatus.cpp
Source/JavaScriptCore/bytecode/CallLinkStatus.h
Source/JavaScriptCore/bytecode/CallVariant.cpp [deleted file]
Source/JavaScriptCore/bytecode/CallVariant.h [deleted file]
Source/JavaScriptCore/bytecode/CodeOrigin.h
Source/JavaScriptCore/bytecode/ExitKind.cpp
Source/JavaScriptCore/bytecode/ExitKind.h
Source/JavaScriptCore/bytecode/GetByIdStatus.cpp
Source/JavaScriptCore/bytecode/PutByIdStatus.cpp
Source/JavaScriptCore/dfg/DFGAbstractInterpreterInlines.h
Source/JavaScriptCore/dfg/DFGBackwardsPropagationPhase.cpp
Source/JavaScriptCore/dfg/DFGBasicBlock.cpp
Source/JavaScriptCore/dfg/DFGBasicBlock.h
Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp
Source/JavaScriptCore/dfg/DFGCPSRethreadingPhase.cpp
Source/JavaScriptCore/dfg/DFGClobberize.h
Source/JavaScriptCore/dfg/DFGCommon.h
Source/JavaScriptCore/dfg/DFGConstantFoldingPhase.cpp
Source/JavaScriptCore/dfg/DFGDoesGC.cpp
Source/JavaScriptCore/dfg/DFGDriver.cpp
Source/JavaScriptCore/dfg/DFGFixupPhase.cpp
Source/JavaScriptCore/dfg/DFGGraph.cpp
Source/JavaScriptCore/dfg/DFGJITCompiler.cpp
Source/JavaScriptCore/dfg/DFGLazyJSValue.cpp
Source/JavaScriptCore/dfg/DFGLazyJSValue.h
Source/JavaScriptCore/dfg/DFGNode.cpp
Source/JavaScriptCore/dfg/DFGNode.h
Source/JavaScriptCore/dfg/DFGNodeType.h
Source/JavaScriptCore/dfg/DFGPhantomCanonicalizationPhase.cpp
Source/JavaScriptCore/dfg/DFGPhantomRemovalPhase.cpp
Source/JavaScriptCore/dfg/DFGPredictionPropagationPhase.cpp
Source/JavaScriptCore/dfg/DFGSafeToExecute.h
Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp
Source/JavaScriptCore/dfg/DFGSpeculativeJIT32_64.cpp
Source/JavaScriptCore/dfg/DFGSpeculativeJIT64.cpp
Source/JavaScriptCore/dfg/DFGStructureRegistrationPhase.cpp
Source/JavaScriptCore/dfg/DFGTierUpCheckInjectionPhase.cpp
Source/JavaScriptCore/dfg/DFGValidate.cpp
Source/JavaScriptCore/dfg/DFGWatchpointCollectionPhase.cpp
Source/JavaScriptCore/ftl/FTLCapabilities.cpp
Source/JavaScriptCore/ftl/FTLLowerDFGToLLVM.cpp
Source/JavaScriptCore/heap/Heap.cpp
Source/JavaScriptCore/jit/AssemblyHelpers.h
Source/JavaScriptCore/jit/CCallHelpers.h
Source/JavaScriptCore/jit/GPRInfo.h
Source/JavaScriptCore/jit/JITCall.cpp
Source/JavaScriptCore/jit/JITCall32_64.cpp
Source/JavaScriptCore/runtime/Options.h
Source/JavaScriptCore/runtime/VM.cpp
Source/JavaScriptCore/runtime/VM.h
Source/JavaScriptCore/tests/stress/new-array-then-exit.js [deleted file]
Source/JavaScriptCore/tests/stress/poly-call-exit-this.js [deleted file]
Source/JavaScriptCore/tests/stress/poly-call-exit.js [deleted file]
Source/WTF/ChangeLog
Source/WTF/wtf/OwnPtr.h
Source/WTF/wtf/Spectrum.h

index 01e4ec7..18ff61c 100644 (file)
@@ -1,3 +1,18 @@
+2014-08-26  Commit Queue  <commit-queue@webkit.org>
+
+        Unreviewed, rolling out r172940.
+        https://bugs.webkit.org/show_bug.cgi?id=136256
+
+        Caused assertions on fast/storage/serialized-script-
+        value.html, and possibly flakiness on more tests (Requested by
+        ap on #webkit).
+
+        Reverted changeset:
+
+        "FTL should be able to do polymorphic call inlining"
+        https://bugs.webkit.org/show_bug.cgi?id=135145
+        http://trac.webkit.org/changeset/172940
+
 2014-08-23  Filip Pizlo  <fpizlo@apple.com>
 
         FTL should be able to do polymorphic call inlining
diff --git a/LayoutTests/js/regress/script-tests/simple-poly-call-nested.js b/LayoutTests/js/regress/script-tests/simple-poly-call-nested.js
deleted file mode 100644 (file)
index b57e7d0..0000000
+++ /dev/null
@@ -1,25 +0,0 @@
-(function() {
-    function foo(x) { return 1; }
-    function bar(x) { return x; }
-    
-    var n = 1000000;
-    
-    var result = (function() {
-        var f = foo;
-        var g = bar;
-        
-        var result = 0;
-        for (var i = 0; i < n; ++i) {
-            result += f(42);
-            
-            var tmp = f;
-            f = g;
-            g = tmp;
-        }
-        
-        return result;
-    })();
-    
-    if (result != n / 2 * 42 + n / 2 * 1)
-        throw "Error: bad result: " + result;
-})();
diff --git a/LayoutTests/js/regress/script-tests/simple-poly-call.js b/LayoutTests/js/regress/script-tests/simple-poly-call.js
deleted file mode 100644 (file)
index 1fdfef5..0000000
+++ /dev/null
@@ -1,20 +0,0 @@
-(function() {
-    function foo(x) { return 1; }
-    function bar(x) { return x; }
-    
-    var f = foo;
-    var g = bar;
-    
-    var result = 0;
-    var n = 100000;
-    for (var i = 0; i < n; ++i) {
-        result += f(42);
-        
-        var tmp = f;
-        f = g;
-        g = tmp;
-    }
-    
-    if (result != n / 2 * 42 + n / 2 * 1)
-        throw "Error: bad result: " + result;
-})();
diff --git a/LayoutTests/js/regress/simple-poly-call-expected.txt b/LayoutTests/js/regress/simple-poly-call-expected.txt
deleted file mode 100644 (file)
index e12eebe..0000000
+++ /dev/null
@@ -1,10 +0,0 @@
-JSRegress/simple-poly-call
-
-On success, you will see a series of "PASS" messages, followed by "TEST COMPLETE".
-
-
-PASS no exception thrown
-PASS successfullyParsed is true
-
-TEST COMPLETE
-
diff --git a/LayoutTests/js/regress/simple-poly-call-nested-expected.txt b/LayoutTests/js/regress/simple-poly-call-nested-expected.txt
deleted file mode 100644 (file)
index 684ddfc..0000000
+++ /dev/null
@@ -1,10 +0,0 @@
-JSRegress/simple-poly-call-nested
-
-On success, you will see a series of "PASS" messages, followed by "TEST COMPLETE".
-
-
-PASS no exception thrown
-PASS successfullyParsed is true
-
-TEST COMPLETE
-
diff --git a/LayoutTests/js/regress/simple-poly-call-nested.html b/LayoutTests/js/regress/simple-poly-call-nested.html
deleted file mode 100644 (file)
index 6a98e81..0000000
+++ /dev/null
@@ -1,12 +0,0 @@
-<!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML//EN">
-<html>
-<head>
-<script src="../../resources/js-test-pre.js"></script>
-</head>
-<body>
-<script src="../../resources/regress-pre.js"></script>
-<script src="script-tests/simple-poly-call-nested.js"></script>
-<script src="../../resources/regress-post.js"></script>
-<script src="../../resources/js-test-post.js"></script>
-</body>
-</html>
diff --git a/LayoutTests/js/regress/simple-poly-call.html b/LayoutTests/js/regress/simple-poly-call.html
deleted file mode 100644 (file)
index 6389320..0000000
+++ /dev/null
@@ -1,12 +0,0 @@
-<!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML//EN">
-<html>
-<head>
-<script src="../../resources/js-test-pre.js"></script>
-</head>
-<body>
-<script src="../../resources/regress-pre.js"></script>
-<script src="script-tests/simple-poly-call.js"></script>
-<script src="../../resources/regress-post.js"></script>
-<script src="../../resources/js-test-post.js"></script>
-</body>
-</html>
index 60304ac..50023fb 100644 (file)
@@ -65,11 +65,8 @@ set(JavaScriptCore_SOURCES
     bytecode/ArrayProfile.cpp
     bytecode/BytecodeBasicBlock.cpp
     bytecode/BytecodeLivenessAnalysis.cpp
-    bytecode/CallEdge.cpp
-    bytecode/CallEdgeProfile.cpp
     bytecode/CallLinkInfo.cpp
     bytecode/CallLinkStatus.cpp
-    bytecode/CallVariant.cpp
     bytecode/CodeBlock.cpp
     bytecode/CodeBlockHash.cpp
     bytecode/CodeBlockJettisoningWatchpoint.cpp
index 00e3662..1e22d35 100644 (file)
@@ -1,3 +1,18 @@
+2014-08-26  Commit Queue  <commit-queue@webkit.org>
+
+        Unreviewed, rolling out r172940.
+        https://bugs.webkit.org/show_bug.cgi?id=136256
+
+        Caused assertions on fast/storage/serialized-script-
+        value.html, and possibly flakiness on more tests (Requested by
+        ap on #webkit).
+
+        Reverted changeset:
+
+        "FTL should be able to do polymorphic call inlining"
+        https://bugs.webkit.org/show_bug.cgi?id=135145
+        http://trac.webkit.org/changeset/172940
+
 2014-08-26  Michael Saboff  <msaboff@apple.com>
 
         REGRESSION(r172794) + 32Bit build: ASSERT failures in for-in-tests.js tests.
index 2d6fe91..700083b 100644 (file)
     <ClCompile Include="..\bytecode\ArrayProfile.cpp" />
     <ClCompile Include="..\bytecode\BytecodeBasicBlock.cpp" />
     <ClCompile Include="..\bytecode\BytecodeLivenessAnalysis.cpp" />
-    <ClCompile Include="..\bytecode\CallEdge.cpp" />
-    <ClCompile Include="..\bytecode\CallEdgeProfile.cpp" />
     <ClCompile Include="..\bytecode\CallLinkInfo.cpp" />
     <ClCompile Include="..\bytecode\CallLinkStatus.cpp" />
-    <ClCompile Include="..\bytecode\CallVariant.cpp" />
     <ClCompile Include="..\bytecode\CodeBlock.cpp" />
     <ClCompile Include="..\bytecode\CodeBlockHash.cpp" />
     <ClCompile Include="..\bytecode\CodeBlockJettisoningWatchpoint.cpp" />
     <ClInclude Include="..\bytecode\BytecodeBasicBlock.h" />
     <ClInclude Include="..\bytecode\BytecodeLivenessAnalysis.h" />
     <ClInclude Include="..\bytecode\BytecodeUseDef.h" />
-    <ClInclude Include="..\bytecode\CallEdge.h" />
-    <ClInclude Include="..\bytecode\CallEdgeProfile.h" />
-    <ClInclude Include="..\bytecode\CallEdgeProfileInlines.h" />
     <ClInclude Include="..\bytecode\CallLinkInfo.h" />
     <ClInclude Include="..\bytecode\CallLinkStatus.h" />
     <ClInclude Include="..\bytecode\CallReturnOffsetToBytecodeOffset.h" />
-    <ClInclude Include="..\bytecode\CallVariant.h" />
     <ClInclude Include="..\bytecode\CodeBlock.h" />
     <ClInclude Include="..\bytecode\CodeBlockHash.h" />
     <ClInclude Include="..\bytecode\CodeBlockJettisoningWatchpoint.h" />
index bea501d..60e6922 100644 (file)
                0F3B3A281544C997003ED0FF /* DFGCFGSimplificationPhase.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F3B3A251544C991003ED0FF /* DFGCFGSimplificationPhase.h */; settings = {ATTRIBUTES = (Private, ); }; };
                0F3B3A2B15475000003ED0FF /* DFGValidate.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0F3B3A2915474FF4003ED0FF /* DFGValidate.cpp */; };
                0F3B3A2C15475002003ED0FF /* DFGValidate.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F3B3A2A15474FF4003ED0FF /* DFGValidate.h */; settings = {ATTRIBUTES = (Private, ); }; };
-               0F3B7E2619A11B8000D9BC56 /* CallEdge.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F3B7E2019A11B8000D9BC56 /* CallEdge.h */; settings = {ATTRIBUTES = (Private, ); }; };
-               0F3B7E2719A11B8000D9BC56 /* CallEdgeProfile.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0F3B7E2119A11B8000D9BC56 /* CallEdgeProfile.cpp */; };
-               0F3B7E2819A11B8000D9BC56 /* CallEdgeProfile.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F3B7E2219A11B8000D9BC56 /* CallEdgeProfile.h */; settings = {ATTRIBUTES = (Private, ); }; };
-               0F3B7E2919A11B8000D9BC56 /* CallEdgeProfileInlines.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F3B7E2319A11B8000D9BC56 /* CallEdgeProfileInlines.h */; settings = {ATTRIBUTES = (Private, ); }; };
-               0F3B7E2A19A11B8000D9BC56 /* CallVariant.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0F3B7E2419A11B8000D9BC56 /* CallVariant.cpp */; };
-               0F3B7E2B19A11B8000D9BC56 /* CallVariant.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F3B7E2519A11B8000D9BC56 /* CallVariant.h */; settings = {ATTRIBUTES = (Private, ); }; };
-               0F3B7E2D19A12AAE00D9BC56 /* CallEdge.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0F3B7E2C19A12AAE00D9BC56 /* CallEdge.cpp */; };
                0F3D0BBC194A414300FC9CF9 /* ConstantStructureCheck.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0F3D0BBA194A414300FC9CF9 /* ConstantStructureCheck.cpp */; };
                0F3D0BBD194A414300FC9CF9 /* ConstantStructureCheck.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F3D0BBB194A414300FC9CF9 /* ConstantStructureCheck.h */; settings = {ATTRIBUTES = (Private, ); }; };
                0F426A481460CBB300131F8F /* ValueRecovery.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F426A451460CBAB00131F8F /* ValueRecovery.h */; settings = {ATTRIBUTES = (Private, ); }; };
                0F3B3A251544C991003ED0FF /* DFGCFGSimplificationPhase.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = DFGCFGSimplificationPhase.h; path = dfg/DFGCFGSimplificationPhase.h; sourceTree = "<group>"; };
                0F3B3A2915474FF4003ED0FF /* DFGValidate.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = DFGValidate.cpp; path = dfg/DFGValidate.cpp; sourceTree = "<group>"; };
                0F3B3A2A15474FF4003ED0FF /* DFGValidate.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = DFGValidate.h; path = dfg/DFGValidate.h; sourceTree = "<group>"; };
-               0F3B7E2019A11B8000D9BC56 /* CallEdge.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = CallEdge.h; sourceTree = "<group>"; };
-               0F3B7E2119A11B8000D9BC56 /* CallEdgeProfile.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CallEdgeProfile.cpp; sourceTree = "<group>"; };
-               0F3B7E2219A11B8000D9BC56 /* CallEdgeProfile.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = CallEdgeProfile.h; sourceTree = "<group>"; };
-               0F3B7E2319A11B8000D9BC56 /* CallEdgeProfileInlines.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = CallEdgeProfileInlines.h; sourceTree = "<group>"; };
-               0F3B7E2419A11B8000D9BC56 /* CallVariant.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CallVariant.cpp; sourceTree = "<group>"; };
-               0F3B7E2519A11B8000D9BC56 /* CallVariant.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = CallVariant.h; sourceTree = "<group>"; };
-               0F3B7E2C19A12AAE00D9BC56 /* CallEdge.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CallEdge.cpp; sourceTree = "<group>"; };
                0F3D0BBA194A414300FC9CF9 /* ConstantStructureCheck.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ConstantStructureCheck.cpp; sourceTree = "<group>"; };
                0F3D0BBB194A414300FC9CF9 /* ConstantStructureCheck.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ConstantStructureCheck.h; sourceTree = "<group>"; };
                0F426A451460CBAB00131F8F /* ValueRecovery.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ValueRecovery.h; sourceTree = "<group>"; };
                                0F666EBE183566F900D017F1 /* BytecodeLivenessAnalysisInlines.h */,
                                0F885E101849A3BE00F1E3FA /* BytecodeUseDef.h */,
                                0F8023E91613832300A0BA45 /* ByValInfo.h */,
-                               0F3B7E2C19A12AAE00D9BC56 /* CallEdge.cpp */,
-                               0F3B7E2019A11B8000D9BC56 /* CallEdge.h */,
-                               0F3B7E2119A11B8000D9BC56 /* CallEdgeProfile.cpp */,
-                               0F3B7E2219A11B8000D9BC56 /* CallEdgeProfile.h */,
-                               0F3B7E2319A11B8000D9BC56 /* CallEdgeProfileInlines.h */,
                                0F0B83AE14BCF71400885B4F /* CallLinkInfo.cpp */,
                                0F0B83AF14BCF71400885B4F /* CallLinkInfo.h */,
                                0F93329314CA7DC10085F3C6 /* CallLinkStatus.cpp */,
                                0F93329414CA7DC10085F3C6 /* CallLinkStatus.h */,
                                0F0B83B814BCF95B00885B4F /* CallReturnOffsetToBytecodeOffset.h */,
-                               0F3B7E2419A11B8000D9BC56 /* CallVariant.cpp */,
-                               0F3B7E2519A11B8000D9BC56 /* CallVariant.h */,
                                969A07900ED1D3AE00F1F681 /* CodeBlock.cpp */,
                                969A07910ED1D3AE00F1F681 /* CodeBlock.h */,
                                0F8F943D1667632D00D61971 /* CodeBlockHash.cpp */,
                                0FFFC95A14EF90A900C72532 /* DFGCSEPhase.h in Headers */,
                                0F2FC77316E12F740038D976 /* DFGDCEPhase.h in Headers */,
                                0F8F2B9A172F0501007DBDA5 /* DFGDesiredIdentifiers.h in Headers */,
-                               0F3B7E2819A11B8000D9BC56 /* CallEdgeProfile.h in Headers */,
                                C2C0F7CE17BBFC5B00464FE4 /* DFGDesiredTransitions.h in Headers */,
                                0FE8534C1723CDA500B618F5 /* DFGDesiredWatchpoints.h in Headers */,
                                C2981FD917BAEE4B00A3BC98 /* DFGDesiredWeakReferences.h in Headers */,
                                0F766D3115AA8112008F363E /* JITStubRoutine.h in Headers */,
                                0F766D2C15A8CC3A008F363E /* JITStubRoutineSet.h in Headers */,
                                14C5242B0F5355E900BA3D04 /* JITStubs.h in Headers */,
-                               0F3B7E2B19A11B8000D9BC56 /* CallVariant.h in Headers */,
                                FEF6835E174343CC00A32E25 /* JITStubsARM.h in Headers */,
                                FEF6835F174343CC00A32E25 /* JITStubsARMv7.h in Headers */,
                                FEF68361174343CC00A32E25 /* JITStubsX86.h in Headers */,
                                A76F54A313B28AAB00EF2BCE /* JITWriteBarrier.h in Headers */,
                                BC18C4160E16F5CD00B34460 /* JSActivation.h in Headers */,
                                840480131021A1D9008E7F01 /* JSAPIValueWrapper.h in Headers */,
-                               0F3B7E2919A11B8000D9BC56 /* CallEdgeProfileInlines.h in Headers */,
                                C2CF39C216E15A8100DD69BE /* JSAPIWrapperObject.h in Headers */,
                                A76140D2182982CB00750624 /* JSArgumentsIterator.h in Headers */,
                                BC18C4170E16F5CD00B34460 /* JSArray.h in Headers */,
                                E49DC16C12EF294E00184A1F /* SourceProviderCache.h in Headers */,
                                E49DC16D12EF295300184A1F /* SourceProviderCacheItem.h in Headers */,
                                0FB7F39E15ED8E4600F167B2 /* SparseArrayValueMap.h in Headers */,
-                               0F3B7E2619A11B8000D9BC56 /* CallEdge.h in Headers */,
                                A7386554118697B400540279 /* SpecializedThunkJIT.h in Headers */,
                                0F5541B21613C1FB00CE3E25 /* SpecialPointer.h in Headers */,
                                0FD82E54141DAEEE00179C94 /* SpeculatedType.h in Headers */,
                                0F235BD517178E1C00690C7F /* FTLExitArgumentForOperand.cpp in Sources */,
                                0F235BD817178E1C00690C7F /* FTLExitThunkGenerator.cpp in Sources */,
                                0F235BDA17178E1C00690C7F /* FTLExitValue.cpp in Sources */,
-                               0F3B7E2719A11B8000D9BC56 /* CallEdgeProfile.cpp in Sources */,
                                A7F2996B17A0BB670010417A /* FTLFail.cpp in Sources */,
                                0FD8A31917D51F2200CA2C40 /* FTLForOSREntryJITCode.cpp in Sources */,
                                0F25F1AF181635F300522F39 /* FTLInlineCacheSize.cpp in Sources */,
                                0F4680D214BBD16500BFE272 /* LLIntData.cpp in Sources */,
                                0F38B01117CF078000B144D3 /* LLIntEntrypoint.cpp in Sources */,
                                0F4680A814BA7FAB00BFE272 /* LLIntExceptions.cpp in Sources */,
-                               0F3B7E2D19A12AAE00D9BC56 /* CallEdge.cpp in Sources */,
                                0F4680A414BA7F8D00BFE272 /* LLIntSlowPaths.cpp in Sources */,
                                0F0B839C14BCF46300885B4F /* LLIntThunks.cpp in Sources */,
                                0FCEFACD1805E75500472CE4 /* LLVMAPI.cpp in Sources */,
                                14E84FA014EE1ACC00D6D5D4 /* WeakSet.cpp in Sources */,
                                2A4EC90B1860D6C20094F782 /* WriteBarrierBuffer.cpp in Sources */,
                                0FC8150B14043C0E00CFA603 /* WriteBarrierSupport.cpp in Sources */,
-                               0F3B7E2A19A11B8000D9BC56 /* CallVariant.cpp in Sources */,
                                A7E5AB3A1799E4B200D2833D /* X86Disassembler.cpp in Sources */,
                                863C6D9C1521111A00585E4E /* YarrCanonicalizeUCS2.cpp in Sources */,
                                86704B8412DBA33700A9FE7B /* YarrInterpreter.cpp in Sources */,
diff --git a/Source/JavaScriptCore/bytecode/CallEdge.cpp b/Source/JavaScriptCore/bytecode/CallEdge.cpp
deleted file mode 100644 (file)
index dffff6d..0000000
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Copyright (C) 2014 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
- */
-
-#include "config.h"
-#include "CallEdge.h"
-
-namespace JSC {
-
-void CallEdge::dump(PrintStream& out) const
-{
-    out.print("<", m_callee, ", count: ", m_count, ">");
-}
-
-} // namespace JSC
-
diff --git a/Source/JavaScriptCore/bytecode/CallEdge.h b/Source/JavaScriptCore/bytecode/CallEdge.h
deleted file mode 100644 (file)
index 7288492..0000000
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * Copyright (C) 2014 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
- */
-
-#ifndef CallEdge_h
-#define CallEdge_h
-
-#include "CallVariant.h"
-
-namespace JSC {
-
-typedef uint16_t CallEdgeCountType;
-
-class CallEdge {
-public:
-    CallEdge();
-    CallEdge(CallVariant, CallEdgeCountType);
-    
-    bool operator!() const { return !m_callee; }
-    
-    CallVariant callee() const { return m_callee; }
-    CallEdgeCountType count() const { return m_count; }
-    
-    CallEdge despecifiedClosure() const
-    {
-        return CallEdge(m_callee.despecifiedClosure(), m_count);
-    }
-    
-    void dump(PrintStream&) const;
-    
-public:
-    CallVariant m_callee;
-    CallEdgeCountType m_count;
-};
-
-inline CallEdge::CallEdge(CallVariant callee, CallEdgeCountType count)
-    : m_callee(callee)
-    , m_count(count)
-{
-}
-
-inline CallEdge::CallEdge()
-    : CallEdge(CallVariant(), 0)
-{
-}
-
-typedef Vector<CallEdge, 1> CallEdgeList;
-
-} // namespace JSC
-
-#endif // CallEdge_h
-
diff --git a/Source/JavaScriptCore/bytecode/CallEdgeProfile.cpp b/Source/JavaScriptCore/bytecode/CallEdgeProfile.cpp
deleted file mode 100644 (file)
index 24eaed4..0000000
+++ /dev/null
@@ -1,348 +0,0 @@
-/*
- * Copyright (C) 2014 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
- */
-
-#include "config.h"
-#include "CallEdgeProfile.h"
-
-#include "CCallHelpers.h"
-#include "CallEdgeProfileInlines.h"
-#include "JITOperations.h"
-#include "JSCInlines.h"
-
-namespace JSC {
-
-CallEdgeList CallEdgeProfile::callEdges() const
-{
-    ConcurrentJITLocker locker(m_lock);
-    
-    CallEdgeList result;
-    
-    CallVariant primaryCallee = m_primaryCallee;
-    CallEdgeCountType numCallsToPrimary = m_numCallsToPrimary;
-    // Defend against races. These fields are modified by the log processor without locking.
-    if (!!primaryCallee && numCallsToPrimary)
-        result.append(CallEdge(primaryCallee, numCallsToPrimary));
-    
-    if (m_otherCallees) {
-        // Make sure that if the primary thread had just created a m_otherCalles while log
-        // processing, we see a consistently created one. The lock being held is insufficient for
-        // this, since the log processor will only grab the lock when merging the secondary
-        // spectrum into the primary one but may still create the data structure without holding
-        // locks.
-        WTF::loadLoadFence();
-        for (CallEdge& entry : m_otherCallees->m_processed) {
-            // Defend against the possibility that the primary duplicates an entry in the secondary
-            // spectrum. That can happen when the GC removes the primary. We could have the GC fix
-            // the situation by changing the primary to be something from the secondary spectrum,
-            // but this fix seems simpler to implement and also cheaper.
-            if (entry.callee() == result[0].callee()) {
-                result[0] = CallEdge(result[0].callee(), entry.count() + result[0].count());
-                continue;
-            }
-            
-            result.append(entry);
-        }
-    }
-    
-    std::sort(result.begin(), result.end(), [] (const CallEdge& a, const CallEdge& b) -> bool {
-            return a.count() > b.count();
-        });
-    
-    if (result.size() >= 2)
-        ASSERT(result[0].count() >= result.last().count());
-    
-    return result;
-}
-
-CallEdgeCountType CallEdgeProfile::numCallsToKnownCells() const
-{
-    CallEdgeCountType result = 0;
-    for (CallEdge& edge : callEdges())
-        result += edge.count();
-    return result;
-}
-
-static bool worthDespecifying(const CallVariant& variant)
-{
-    return !Heap::isMarked(variant.rawCalleeCell())
-        && Heap::isMarked(variant.despecifiedClosure().rawCalleeCell());
-}
-
-bool CallEdgeProfile::worthDespecifying()
-{
-    if (m_closuresAreDespecified)
-        return false;
-    
-    if (!!m_primaryCallee && !JSC::worthDespecifying(m_primaryCallee))
-        return false;
-    
-    if (m_otherCallees) {
-        for (unsigned i = m_otherCallees->m_processed.size(); i--;) {
-            if (!JSC::worthDespecifying(m_otherCallees->m_processed[i].callee()))
-                return false;
-        }
-    }
-    
-    return true;
-}
-
-void CallEdgeProfile::visitWeak()
-{
-    if (!m_primaryCallee && !m_otherCallees)
-        return;
-    
-    ConcurrentJITLocker locker(m_lock);
-    
-    // See if anything is dead and if that can be rectified by despecifying.
-    if (worthDespecifying()) {
-        CallSpectrum newSpectrum;
-        
-        if (!!m_primaryCallee)
-            newSpectrum.add(m_primaryCallee.despecifiedClosure(), m_numCallsToPrimary);
-        
-        if (m_otherCallees) {
-            for (unsigned i = m_otherCallees->m_processed.size(); i--;) {
-                newSpectrum.add(
-                    m_otherCallees->m_processed[i].callee().despecifiedClosure(),
-                    m_otherCallees->m_processed[i].count());
-            }
-        }
-        
-        Vector<CallSpectrum::KeyAndCount> list = newSpectrum.buildList();
-        ASSERT(list.size());
-        m_primaryCallee = list.last().key;
-        m_numCallsToPrimary = list.last().count;
-        
-        ASSERT(!!m_otherCallees == (list.size() >= 2));
-        if (m_otherCallees) {
-            m_otherCallees->m_processed.clear();
-            for (unsigned i = list.size() - 1; i--;)
-                m_otherCallees->m_processed.append(CallEdge(list[i].key, list[i].count));
-        }
-        
-        m_closuresAreDespecified = true;
-        
-        return;
-    }
-    
-    if (!!m_primaryCallee && !Heap::isMarked(m_primaryCallee.rawCalleeCell())) {
-        m_numCallsToUnknownCell += m_numCallsToPrimary;
-        
-        m_primaryCallee = CallVariant();
-        m_numCallsToPrimary = 0;
-    }
-    
-    if (m_otherCallees) {
-        for (unsigned i = 0; i < m_otherCallees->m_processed.size(); i++) {
-            if (Heap::isMarked(m_otherCallees->m_processed[i].callee().rawCalleeCell()))
-                continue;
-            
-            m_numCallsToUnknownCell += m_otherCallees->m_processed[i].count();
-            m_otherCallees->m_processed[i--] = m_otherCallees->m_processed.last();
-            m_otherCallees->m_processed.removeLast();
-        }
-        
-        // Only exists while we are processing the log.
-        RELEASE_ASSERT(!m_otherCallees->m_temporarySpectrum);
-    }
-}
-
-void CallEdgeProfile::addSlow(CallVariant callee, CallEdgeProfileVector& mergeBackLog)
-{
-    // This exists to handle cases where the spectrum wasn't created yet, or we're storing to a
-    // particular spectrum for the first time during a log processing iteration.
-    
-    if (!m_otherCallees) {
-        m_otherCallees = std::make_unique<Secondary>();
-        // If a compiler thread notices the m_otherCallees being non-null, we want to make sure
-        // that it sees a fully created one.
-        WTF::storeStoreFence();
-    }
-    
-    if (!m_otherCallees->m_temporarySpectrum) {
-        m_otherCallees->m_temporarySpectrum = std::make_unique<CallSpectrum>();
-        for (unsigned i = m_otherCallees->m_processed.size(); i--;) {
-            m_otherCallees->m_temporarySpectrum->add(
-                m_otherCallees->m_processed[i].callee(),
-                m_otherCallees->m_processed[i].count());
-        }
-        
-        // This means that this is the first time we're seeing this profile during this log
-        // processing iteration.
-        mergeBackLog.append(this);
-    }
-    
-    m_otherCallees->m_temporarySpectrum->add(callee);
-}
-
-void CallEdgeProfile::mergeBack()
-{
-    ConcurrentJITLocker locker(m_lock);
-    
-    ASSERT(m_otherCallees);
-    ASSERT(m_otherCallees->m_temporarySpectrum);
-    
-    if (!!m_primaryCallee)
-        m_otherCallees->m_temporarySpectrum->add(m_primaryCallee, m_numCallsToPrimary);
-    
-    if (!m_closuresAreDespecified) {
-        CallSpectrum newSpectrum;
-        for (auto& entry : *m_otherCallees->m_temporarySpectrum)
-            newSpectrum.add(entry.key.despecifiedClosure(), entry.value);
-        
-        if (newSpectrum.size() < m_otherCallees->m_temporarySpectrum->size()) {
-            *m_otherCallees->m_temporarySpectrum = newSpectrum;
-            m_closuresAreDespecified = true;
-        }
-    }
-    
-    Vector<CallSpectrum::KeyAndCount> list = m_otherCallees->m_temporarySpectrum->buildList();
-    m_otherCallees->m_temporarySpectrum = nullptr;
-    
-    m_primaryCallee = list.last().key;
-    m_numCallsToPrimary = list.last().count;
-    list.removeLast();
-    
-    m_otherCallees->m_processed.clear();
-    for (unsigned count = maxKnownCallees; count-- && !list.isEmpty();) {
-        m_otherCallees->m_processed.append(CallEdge(list.last().key, list.last().count));
-        list.removeLast();
-    }
-    
-    for (unsigned i = list.size(); i--;)
-        m_numCallsToUnknownCell += list[i].count;
-}
-
-void CallEdgeProfile::fadeByHalf()
-{
-    m_numCallsToPrimary >>= 1;
-    m_numCallsToNotCell >>= 1;
-    m_numCallsToUnknownCell >>= 1;
-    m_totalCount >>= 1;
-    
-    if (m_otherCallees) {
-        for (unsigned i = m_otherCallees->m_processed.size(); i--;) {
-            m_otherCallees->m_processed[i] = CallEdge(
-                m_otherCallees->m_processed[i].callee(),
-                m_otherCallees->m_processed[i].count() >> 1);
-        }
-        
-        if (m_otherCallees->m_temporarySpectrum) {
-            for (auto& entry : *m_otherCallees->m_temporarySpectrum)
-                entry.value >>= 1;
-        }
-    }
-}
-
-CallEdgeLog::CallEdgeLog()
-    : m_scaledLogIndex(logSize * sizeof(Entry))
-{
-    ASSERT(!(m_scaledLogIndex % sizeof(Entry)));
-}
-
-CallEdgeLog::~CallEdgeLog() { }
-
-bool CallEdgeLog::isEnabled()
-{
-    return Options::enableCallEdgeProfiling() && Options::useFTLJIT();
-}
-
-#if ENABLE(JIT)
-
-extern "C" JIT_OPERATION void operationProcessCallEdgeLog(CallEdgeLog*) WTF_INTERNAL;
-extern "C" JIT_OPERATION void operationProcessCallEdgeLog(CallEdgeLog* log)
-{
-    log->processLog();
-}
-
-void CallEdgeLog::emitLogCode(CCallHelpers& jit, CallEdgeProfile& profile, JSValueRegs calleeRegs)
-{
-    const unsigned numberOfArguments = 1;
-    
-    GPRReg scratchGPR;
-    if (!calleeRegs.uses(GPRInfo::regT0))
-        scratchGPR = GPRInfo::regT0;
-    else if (!calleeRegs.uses(GPRInfo::regT1))
-        scratchGPR = GPRInfo::regT1;
-    else
-        scratchGPR = GPRInfo::regT2;
-    
-    jit.load32(&m_scaledLogIndex, scratchGPR);
-    
-    CCallHelpers::Jump ok = jit.branchTest32(CCallHelpers::NonZero, scratchGPR);
-    
-    ASSERT_UNUSED(numberOfArguments, stackAlignmentRegisters() >= 1 + numberOfArguments);
-    
-    jit.subPtr(CCallHelpers::TrustedImm32(stackAlignmentBytes()), CCallHelpers::stackPointerRegister);
-    
-    jit.storeValue(calleeRegs, CCallHelpers::Address(CCallHelpers::stackPointerRegister, sizeof(JSValue)));
-    jit.setupArguments(CCallHelpers::TrustedImmPtr(this));
-    jit.move(CCallHelpers::TrustedImmPtr(bitwise_cast<void*>(operationProcessCallEdgeLog)), GPRInfo::nonArgGPR0);
-    jit.call(GPRInfo::nonArgGPR0);
-    jit.loadValue(CCallHelpers::Address(CCallHelpers::stackPointerRegister, sizeof(JSValue)), calleeRegs);
-    
-    jit.addPtr(CCallHelpers::TrustedImm32(stackAlignmentBytes()), CCallHelpers::stackPointerRegister);
-    
-    jit.move(CCallHelpers::TrustedImm32(logSize * sizeof(Entry)), scratchGPR);
-    ok.link(&jit);
-
-    jit.sub32(CCallHelpers::TrustedImm32(sizeof(Entry)), scratchGPR);
-    jit.store32(scratchGPR, &m_scaledLogIndex);
-    jit.addPtr(CCallHelpers::TrustedImmPtr(m_log), scratchGPR);
-    jit.storeValue(calleeRegs, CCallHelpers::Address(scratchGPR, OBJECT_OFFSETOF(Entry, m_value)));
-    jit.storePtr(CCallHelpers::TrustedImmPtr(&profile), CCallHelpers::Address(scratchGPR, OBJECT_OFFSETOF(Entry, m_profile)));
-}
-
-void CallEdgeLog::emitLogCode(
-    CCallHelpers& jit, OwnPtr<CallEdgeProfile>& profilePointer, JSValueRegs calleeRegs)
-{
-    if (!isEnabled())
-        return;
-    
-    profilePointer.createTransactionally();
-    emitLogCode(jit, *profilePointer, calleeRegs);
-}
-
-#endif // ENABLE(JIT)
-
-void CallEdgeLog::processLog()
-{
-    ASSERT(!(m_scaledLogIndex % sizeof(Entry)));
-    
-    if (Options::callEdgeProfileReallyProcessesLog()) {
-        CallEdgeProfileVector mergeBackLog;
-        
-        for (unsigned i = m_scaledLogIndex / sizeof(Entry); i < logSize; ++i)
-            m_log[i].m_profile->add(m_log[i].m_value, mergeBackLog);
-        
-        for (unsigned i = mergeBackLog.size(); i--;)
-            mergeBackLog[i]->mergeBack();
-    }
-    
-    m_scaledLogIndex = logSize * sizeof(Entry);
-}
-
-} // namespace JSC
-
diff --git a/Source/JavaScriptCore/bytecode/CallEdgeProfile.h b/Source/JavaScriptCore/bytecode/CallEdgeProfile.h
deleted file mode 100644 (file)
index bafd5a2..0000000
+++ /dev/null
@@ -1,130 +0,0 @@
-/*
- * Copyright (C) 2014 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
- */
-
-#ifndef CallEdgeProfile_h
-#define CallEdgeProfile_h
-
-#include "CallEdge.h"
-#include "CallVariant.h"
-#include "ConcurrentJITLock.h"
-#include "JSCell.h"
-#include <wtf/OwnPtr.h>
-
-namespace JSC {
-
-class CCallHelpers;
-class LLIntOffsetsExtractor;
-
-class CallEdgeLog;
-class CallEdgeProfile;
-typedef Vector<CallEdgeProfile*, 10> CallEdgeProfileVector;
-
-class CallEdgeProfile {
-public:
-    CallEdgeProfile();
-    
-    CallEdgeCountType numCallsToNotCell() const { return m_numCallsToNotCell; }
-    CallEdgeCountType numCallsToUnknownCell() const { return m_numCallsToUnknownCell; }
-    CallEdgeCountType numCallsToKnownCells() const;
-    
-    CallEdgeCountType totalCalls() const { return m_totalCount; }
-    
-    // Call while holding the owning CodeBlock's lock.
-    CallEdgeList callEdges() const;
-    
-    void visitWeak();
-    
-private:
-    friend class CallEdgeLog;
-    
-    static const unsigned maxKnownCallees = 5;
-    
-    void add(JSValue, CallEdgeProfileVector& mergeBackLog);
-    
-    bool worthDespecifying();
-    void addSlow(CallVariant, CallEdgeProfileVector& mergeBackLog);
-    void mergeBack();
-    void fadeByHalf();
-    
-    // It's cheaper to let this have its own lock. It needs to be able to find which lock to
-    // lock. Normally it would lock the owning CodeBlock's lock, but that would require a
-    // pointer-width word to point at the CodeBlock. Having the byte-sized lock here is
-    // cheaper. However, this means that the relationship with the CodeBlock lock is:
-    // acquire the CodeBlock lock before this one.
-    mutable ConcurrentJITLock m_lock;
-    
-    bool m_closuresAreDespecified;
-    
-    CallEdgeCountType m_numCallsToPrimary;
-    CallEdgeCountType m_numCallsToNotCell;
-    CallEdgeCountType m_numCallsToUnknownCell;
-    CallEdgeCountType m_totalCount;
-    CallVariant m_primaryCallee;
-    
-    typedef Spectrum<CallVariant, CallEdgeCountType> CallSpectrum;
-    
-    struct Secondary {
-        Vector<CallEdge> m_processed; // Processed but not necessarily sorted.
-        std::unique_ptr<CallSpectrum> m_temporarySpectrum;
-    };
-    
-    std::unique_ptr<Secondary> m_otherCallees;
-};
-
-class CallEdgeLog {
-public:
-    CallEdgeLog();
-    ~CallEdgeLog();
-
-    static bool isEnabled();
-    
-#if ENABLE(JIT)
-    void emitLogCode(CCallHelpers&, CallEdgeProfile&, JSValueRegs calleeRegs); // Assumes that stack is aligned, all volatile registers - other than calleeGPR - are clobberable, and the parameter space is in use.
-    
-    // Same as above but creates a CallEdgeProfile instance if one did not already exist. Does
-    // this in a thread-safe manner by calling OwnPtr::createTransactionally.
-    void emitLogCode(CCallHelpers&, OwnPtr<CallEdgeProfile>&, JSValueRegs calleeRegs);
-#endif // ENABLE(JIT)
-    
-    void processLog();
-    
-private:
-    friend class LLIntOffsetsExtractor;
-
-    static const unsigned logSize = 10000;
-    
-    struct Entry {
-        JSValue m_value;
-        CallEdgeProfile* m_profile;
-    };
-    
-    unsigned m_scaledLogIndex;
-    Entry m_log[logSize];
-};
-
-} // namespace JSC
-
-#endif // CallEdgeProfile_h
-
diff --git a/Source/JavaScriptCore/bytecode/CallEdgeProfileInlines.h b/Source/JavaScriptCore/bytecode/CallEdgeProfileInlines.h
deleted file mode 100644 (file)
index e6ea320..0000000
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Copyright (C) 2014 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
- */
-
-#ifndef CallEdgeProfileInlines_h
-#define CallEdgeProfileInlines_h
-
-#include "CallEdgeProfile.h"
-
-namespace JSC {
-
-inline CallEdgeProfile::CallEdgeProfile()
-    : m_closuresAreDespecified(false)
-    , m_numCallsToPrimary(0)
-    , m_numCallsToNotCell(0)
-    , m_numCallsToUnknownCell(0)
-    , m_totalCount(0)
-    , m_primaryCallee(nullptr)
-{
-}
-
-ALWAYS_INLINE void CallEdgeProfile::add(JSValue value, CallEdgeProfileVector& mergeBackLog)
-{
-    unsigned newTotalCount = m_totalCount + 1;
-    if (UNLIKELY(!newTotalCount)) {
-        fadeByHalf(); // Tackle overflows by dividing all counts by two.
-        newTotalCount = m_totalCount + 1;
-    }
-    ASSERT(newTotalCount);
-    m_totalCount = newTotalCount;
-    
-    if (UNLIKELY(!value.isCell())) {
-        m_numCallsToNotCell++;
-        return;
-    }
-
-    CallVariant callee = CallVariant(value.asCell());
-    
-    if (m_closuresAreDespecified)
-        callee = callee.despecifiedClosure();
-    
-    if (UNLIKELY(!m_primaryCallee)) {
-        m_primaryCallee = callee;
-        m_numCallsToPrimary = 1;
-        return;
-    }
-        
-    if (LIKELY(m_primaryCallee == callee)) {
-        m_numCallsToPrimary++;
-        return;
-    }
-        
-    if (UNLIKELY(!m_otherCallees)) {
-        addSlow(callee, mergeBackLog);
-        return;
-    }
-        
-    CallSpectrum* secondary = m_otherCallees->m_temporarySpectrum.get();
-    if (!secondary) {
-        addSlow(callee, mergeBackLog);
-        return;
-    }
-        
-    secondary->add(callee);
-}
-
-} // namespace JSC
-
-#endif // CallEdgeProfileInlines_h
-
index c284c5b..93cb13d 100644 (file)
@@ -83,11 +83,6 @@ void CallLinkInfo::visitWeak(RepatchBuffer& repatchBuffer)
     }
     if (!!lastSeenCallee && !Heap::isMarked(lastSeenCallee.get()))
         lastSeenCallee.clear();
-    
-    if (callEdgeProfile) {
-        WTF::loadLoadFence();
-        callEdgeProfile->visitWeak();
-    }
 }
 
 CallLinkInfo& CallLinkInfo::dummy()
index 3a65ef5..88094ff 100644 (file)
@@ -26,7 +26,6 @@
 #ifndef CallLinkInfo_h
 #define CallLinkInfo_h
 
-#include "CallEdgeProfile.h"
 #include "ClosureCallStubRoutine.h"
 #include "CodeLocation.h"
 #include "CodeSpecializationKind.h"
@@ -34,7 +33,6 @@
 #include "JSFunction.h"
 #include "Opcode.h"
 #include "WriteBarrier.h"
-#include <wtf/OwnPtr.h>
 #include <wtf/SentinelLinkedList.h>
 
 namespace JSC {
@@ -90,7 +88,6 @@ struct CallLinkInfo : public BasicRawSentinelNode<CallLinkInfo> {
     unsigned calleeGPR : 8;
     unsigned slowPathCount;
     CodeOrigin codeOrigin;
-    OwnPtr<CallEdgeProfile> callEdgeProfile;
 
     bool isLinked() { return stub || callee; }
     void unlink(RepatchBuffer&);
index ecc7233..aa0e962 100644 (file)
 #include "LLIntCallLinkInfo.h"
 #include "JSCInlines.h"
 #include <wtf/CommaPrinter.h>
-#include <wtf/ListDump.h>
 
 namespace JSC {
 
 static const bool verbose = false;
 
 CallLinkStatus::CallLinkStatus(JSValue value)
-    : m_couldTakeSlowPath(false)
+    : m_callTarget(value)
+    , m_executable(0)
+    , m_couldTakeSlowPath(false)
     , m_isProved(false)
 {
-    if (!value || !value.isCell()) {
-        m_couldTakeSlowPath = true;
+    if (!value || !value.isCell())
         return;
-    }
     
-    m_edges.append(CallEdge(CallVariant(value.asCell()), 1));
+    if (!value.asCell()->inherits(JSFunction::info()))
+        return;
+    
+    m_executable = jsCast<JSFunction*>(value.asCell())->executable();
+}
+
+JSFunction* CallLinkStatus::function() const
+{
+    if (!m_callTarget || !m_callTarget.isCell())
+        return 0;
+    
+    if (!m_callTarget.asCell()->inherits(JSFunction::info()))
+        return 0;
+    
+    return jsCast<JSFunction*>(m_callTarget.asCell());
+}
+
+InternalFunction* CallLinkStatus::internalFunction() const
+{
+    if (!m_callTarget || !m_callTarget.isCell())
+        return 0;
+    
+    if (!m_callTarget.asCell()->inherits(InternalFunction::info()))
+        return 0;
+    
+    return jsCast<InternalFunction*>(m_callTarget.asCell());
+}
+
+Intrinsic CallLinkStatus::intrinsicFor(CodeSpecializationKind kind) const
+{
+    if (!m_executable)
+        return NoIntrinsic;
+    
+    return m_executable->intrinsicFor(kind);
 }
 
 CallLinkStatus CallLinkStatus::computeFromLLInt(const ConcurrentJITLocker& locker, CodeBlock* profiledBlock, unsigned bytecodeIndex)
@@ -55,7 +87,7 @@ CallLinkStatus CallLinkStatus::computeFromLLInt(const ConcurrentJITLocker& locke
     UNUSED_PARAM(profiledBlock);
     UNUSED_PARAM(bytecodeIndex);
 #if ENABLE(DFG_JIT)
-    if (profiledBlock->hasExitSite(locker, DFG::FrequentExitSite(bytecodeIndex, BadCell))) {
+    if (profiledBlock->hasExitSite(locker, DFG::FrequentExitSite(bytecodeIndex, BadFunction))) {
         // We could force this to be a closure call, but instead we'll just assume that it
         // takes slow path.
         return takesSlowPath();
@@ -93,7 +125,7 @@ CallLinkStatus CallLinkStatus::computeFor(
     if (!callLinkInfo)
         return computeFromLLInt(locker, profiledBlock, bytecodeIndex);
     
-    return computeFor(locker, profiledBlock, *callLinkInfo, exitSiteData);
+    return computeFor(locker, *callLinkInfo, exitSiteData);
 #else
     return CallLinkStatus();
 #endif
@@ -107,10 +139,10 @@ CallLinkStatus::ExitSiteData CallLinkStatus::computeExitSiteData(
     
 #if ENABLE(DFG_JIT)
     exitSiteData.m_takesSlowPath =
-        profiledBlock->hasExitSite(locker, DFG::FrequentExitSite(bytecodeIndex, BadType, exitingJITType))
+        profiledBlock->hasExitSite(locker, DFG::FrequentExitSite(bytecodeIndex, BadCache, exitingJITType))
         || profiledBlock->hasExitSite(locker, DFG::FrequentExitSite(bytecodeIndex, BadExecutable, exitingJITType));
     exitSiteData.m_badFunction =
-        profiledBlock->hasExitSite(locker, DFG::FrequentExitSite(bytecodeIndex, BadCell, exitingJITType));
+        profiledBlock->hasExitSite(locker, DFG::FrequentExitSite(bytecodeIndex, BadFunction, exitingJITType));
 #else
     UNUSED_PARAM(locker);
     UNUSED_PARAM(profiledBlock);
@@ -122,34 +154,7 @@ CallLinkStatus::ExitSiteData CallLinkStatus::computeExitSiteData(
 }
 
 #if ENABLE(JIT)
-CallLinkStatus CallLinkStatus::computeFor(
-    const ConcurrentJITLocker& locker, CodeBlock* profiledBlock, CallLinkInfo& callLinkInfo)
-{
-    // We don't really need this, but anytime we have to debug this code, it becomes indispensable.
-    UNUSED_PARAM(profiledBlock);
-    
-    if (Options::callStatusShouldUseCallEdgeProfile()) {
-        // Always trust the call edge profile over anything else since this has precise counts.
-        // It can make the best possible decision because it never "forgets" what happened for any
-        // call, with the exception of fading out the counts of old calls (for example if the
-        // counter type is 16-bit then calls that happened more than 2^16 calls ago are given half
-        // weight, and this compounds for every 2^15 [sic] calls after that). The combination of
-        // high fidelity for recent calls and fading for older calls makes this the most useful
-        // mechamism of choosing how to optimize future calls.
-        CallEdgeProfile* edgeProfile = callLinkInfo.callEdgeProfile.get();
-        WTF::loadLoadFence();
-        if (edgeProfile) {
-            CallLinkStatus result = computeFromCallEdgeProfile(edgeProfile);
-            if (!!result)
-                return result;
-        }
-    }
-    
-    return computeFromCallLinkInfo(locker, callLinkInfo);
-}
-
-CallLinkStatus CallLinkStatus::computeFromCallLinkInfo(
-    const ConcurrentJITLocker&, CallLinkInfo& callLinkInfo)
+CallLinkStatus CallLinkStatus::computeFor(const ConcurrentJITLocker&, CallLinkInfo& callLinkInfo)
 {
     // Note that despite requiring that the locker is held, this code is racy with respect
     // to the CallLinkInfo: it may get cleared while this code runs! This is because
@@ -172,7 +177,7 @@ CallLinkStatus CallLinkStatus::computeFromCallLinkInfo(
     
     JSFunction* target = callLinkInfo.lastSeenCallee.get();
     if (!target)
-        return takesSlowPath();
+        return CallLinkStatus();
     
     if (callLinkInfo.hasSeenClosure)
         return CallLinkStatus(target->executable());
@@ -180,43 +185,15 @@ CallLinkStatus CallLinkStatus::computeFromCallLinkInfo(
     return CallLinkStatus(target);
 }
 
-CallLinkStatus CallLinkStatus::computeFromCallEdgeProfile(CallEdgeProfile* edgeProfile)
+CallLinkStatus CallLinkStatus::computeFor(
+    const ConcurrentJITLocker& locker, CallLinkInfo& callLinkInfo, ExitSiteData exitSiteData)
 {
-    // In cases where the call edge profile saw nothing, use the CallLinkInfo instead.
-    if (!edgeProfile->totalCalls())
-        return CallLinkStatus();
-    
-    // To do anything meaningful, we require that the majority of calls are to something we
-    // know how to handle.
-    unsigned numCallsToKnown = edgeProfile->numCallsToKnownCells();
-    unsigned numCallsToUnknown = edgeProfile->numCallsToNotCell() + edgeProfile->numCallsToUnknownCell();
-    
-    // We require that the majority of calls were to something that we could possibly inline.
-    if (numCallsToKnown <= numCallsToUnknown)
-        return takesSlowPath();
-    
-    // We require that the number of such calls is greater than some minimal threshold, so that we
-    // avoid inlining completely cold calls.
-    if (numCallsToKnown < Options::frequentCallThreshold())
+    if (exitSiteData.m_takesSlowPath)
         return takesSlowPath();
     
-    CallLinkStatus result;
-    result.m_edges = edgeProfile->callEdges();
-    result.m_couldTakeSlowPath = !!numCallsToUnknown;
-    result.m_canTrustCounts = true;
-    
-    return result;
-}
-
-CallLinkStatus CallLinkStatus::computeFor(
-    const ConcurrentJITLocker& locker, CodeBlock* profiledBlock, CallLinkInfo& callLinkInfo,
-    ExitSiteData exitSiteData)
-{
-    CallLinkStatus result = computeFor(locker, profiledBlock, callLinkInfo);
+    CallLinkStatus result = computeFor(locker, callLinkInfo);
     if (exitSiteData.m_badFunction)
         result.makeClosureCall();
-    if (exitSiteData.m_takesSlowPath)
-        result.m_couldTakeSlowPath = true;
     
     return result;
 }
@@ -250,7 +227,7 @@ void CallLinkStatus::computeDFGStatuses(
         
         {
             ConcurrentJITLocker locker(dfgCodeBlock->m_lock);
-            map.add(info.codeOrigin, computeFor(locker, dfgCodeBlock, info, exitSiteData));
+            map.add(info.codeOrigin, computeFor(locker, info, exitSiteData));
         }
     }
 #else
@@ -279,31 +256,6 @@ CallLinkStatus CallLinkStatus::computeFor(
     return computeFor(profiledBlock, codeOrigin.bytecodeIndex, baselineMap);
 }
 
-bool CallLinkStatus::isClosureCall() const
-{
-    for (unsigned i = m_edges.size(); i--;) {
-        if (m_edges[i].callee().isClosureCall())
-            return true;
-    }
-    return false;
-}
-
-void CallLinkStatus::makeClosureCall()
-{
-    ASSERT(!m_isProved);
-    for (unsigned i = m_edges.size(); i--;)
-        m_edges[i] = m_edges[i].despecifiedClosure();
-    
-    if (!ASSERT_DISABLED) {
-        // Doing this should not have created duplicates, because the CallEdgeProfile
-        // should despecify closures if doing so would reduce the number of known callees.
-        for (unsigned i = 0; i < m_edges.size(); ++i) {
-            for (unsigned j = i + 1; j < m_edges.size(); ++j)
-                ASSERT(m_edges[i].callee() != m_edges[j].callee());
-        }
-    }
-}
-
 void CallLinkStatus::dump(PrintStream& out) const
 {
     if (!isSet()) {
@@ -319,7 +271,14 @@ void CallLinkStatus::dump(PrintStream& out) const
     if (m_couldTakeSlowPath)
         out.print(comma, "Could Take Slow Path");
     
-    out.print(listDump(m_edges));
+    if (m_callTarget)
+        out.print(comma, "Known target: ", m_callTarget);
+    
+    if (m_executable) {
+        out.print(comma, "Executable/CallHash: ", RawPointer(m_executable));
+        if (!isCompilationThread())
+            out.print("/", m_executable->hashFor(CodeForCall));
+    }
 }
 
 } // namespace JSC
index 6a3d388..9a34135 100644 (file)
@@ -46,9 +46,9 @@ struct CallLinkInfo;
 class CallLinkStatus {
 public:
     CallLinkStatus()
-        : m_couldTakeSlowPath(false)
+        : m_executable(0)
+        , m_couldTakeSlowPath(false)
         , m_isProved(false)
-        , m_canTrustCounts(false)
     {
     }
     
@@ -61,11 +61,10 @@ public:
     
     explicit CallLinkStatus(JSValue);
     
-    CallLinkStatus(CallVariant variant)
-        : m_edges(1, CallEdge(variant, 1))
+    CallLinkStatus(ExecutableBase* executable)
+        : m_executable(executable)
         , m_couldTakeSlowPath(false)
         , m_isProved(false)
-        , m_canTrustCounts(false)
     {
     }
     
@@ -93,9 +92,8 @@ public:
 #if ENABLE(JIT)
     // Computes the status assuming that we never took slow path and never previously
     // exited.
-    static CallLinkStatus computeFor(const ConcurrentJITLocker&, CodeBlock*, CallLinkInfo&);
-    static CallLinkStatus computeFor(
-        const ConcurrentJITLocker&, CodeBlock*, CallLinkInfo&, ExitSiteData);
+    static CallLinkStatus computeFor(const ConcurrentJITLocker&, CallLinkInfo&);
+    static CallLinkStatus computeFor(const ConcurrentJITLocker&, CallLinkInfo&, ExitSiteData);
 #endif
     
     typedef HashMap<CodeOrigin, CallLinkStatus, CodeOriginApproximateHash> ContextMap;
@@ -109,38 +107,37 @@ public:
     static CallLinkStatus computeFor(
         CodeBlock*, CodeOrigin, const CallLinkInfoMap&, const ContextMap&);
     
-    bool isSet() const { return !m_edges.isEmpty() || m_couldTakeSlowPath; }
+    bool isSet() const { return m_callTarget || m_executable || m_couldTakeSlowPath; }
     
     bool operator!() const { return !isSet(); }
     
     bool couldTakeSlowPath() const { return m_couldTakeSlowPath; }
+    bool isClosureCall() const { return m_executable && !m_callTarget; }
     
-    CallEdgeList edges() const { return m_edges; }
-    unsigned size() const { return m_edges.size(); }
-    CallEdge at(unsigned i) const { return m_edges[i]; }
-    CallEdge operator[](unsigned i) const { return at(i); }
+    JSValue callTarget() const { return m_callTarget; }
+    JSFunction* function() const;
+    InternalFunction* internalFunction() const;
+    Intrinsic intrinsicFor(CodeSpecializationKind) const;
+    ExecutableBase* executable() const { return m_executable; }
     bool isProved() const { return m_isProved; }
-    bool canOptimize() const { return !m_edges.isEmpty(); }
-    bool canTrustCounts() const { return m_canTrustCounts; }
-    
-    bool isClosureCall() const; // Returns true if any callee is a closure call.
+    bool canOptimize() const { return (m_callTarget || m_executable) && !m_couldTakeSlowPath; }
     
     void dump(PrintStream&) const;
     
 private:
-    void makeClosureCall();
+    void makeClosureCall()
+    {
+        ASSERT(!m_isProved);
+        // Turn this into a closure call.
+        m_callTarget = JSValue();
+    }
     
     static CallLinkStatus computeFromLLInt(const ConcurrentJITLocker&, CodeBlock*, unsigned bytecodeIndex);
-#if ENABLE(JIT)
-    static CallLinkStatus computeFromCallEdgeProfile(CallEdgeProfile*);
-    static CallLinkStatus computeFromCallLinkInfo(
-        const ConcurrentJITLocker&, CallLinkInfo&);
-#endif
     
-    CallEdgeList m_edges;
+    JSValue m_callTarget;
+    ExecutableBase* m_executable;
     bool m_couldTakeSlowPath;
     bool m_isProved;
-    bool m_canTrustCounts;
 };
 
 } // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/CallVariant.cpp b/Source/JavaScriptCore/bytecode/CallVariant.cpp
deleted file mode 100644 (file)
index 5fe0f74..0000000
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Copyright (C) 2014 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
- */
-
-#include "config.h"
-#include "CallVariant.h"
-
-#include "JSCInlines.h"
-
-namespace JSC {
-
-void CallVariant::dump(PrintStream& out) const
-{
-    if (!*this) {
-        out.print("null");
-        return;
-    }
-    
-    if (InternalFunction* internalFunction = this->internalFunction()) {
-        out.print("InternalFunction: ", JSValue(internalFunction));
-        return;
-    }
-    
-    if (JSFunction* function = this->function()) {
-        out.print("(Function: ", JSValue(function), "; Executable: ", *executable(), ")");
-        return;
-    }
-    
-    out.print("Executable: ", *executable());
-}
-
-} // namespace JSC
-
diff --git a/Source/JavaScriptCore/bytecode/CallVariant.h b/Source/JavaScriptCore/bytecode/CallVariant.h
deleted file mode 100644 (file)
index e85983c..0000000
+++ /dev/null
@@ -1,198 +0,0 @@
-/*
- * Copyright (C) 2014 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
- */
-
-#ifndef CallVariant_h
-#define CallVariant_h
-
-#include "Executable.h"
-#include "JSCell.h"
-#include "JSFunction.h"
-
-namespace JSC {
-
-// The CallVariant class is meant to encapsulate a callee in a way that is useful for call linking
-// and inlining. Because JavaScript has closures, and because JSC implements the notion of internal
-// non-function objects that nevertheless provide call traps, the call machinery wants to see a
-// callee in one of the following four forms:
-//
-// JSFunction callee: This means that we expect the callsite to always call a particular function
-//     instance, that is associated with a particular activation. This pinpoints not just the code
-//     that will be called (i.e. the executable) but also the scope within which the code runs.
-//
-// Executable callee: This corresponds to a call to a closure. In this case, we know that the
-//     callsite will call a JSFunction, but we do not know which particular JSFunction. We do know
-//     what code will be called - i.e. we know the executable.
-//
-// InternalFunction callee: JSC supports a special kind of native functions that support bizarre
-//     semantics. These are always singletons. If we know that the callee is an InternalFunction
-//     then we know both the code that will be called and the scope; in fact the "scope" is really
-//     just the InternalFunction itself.
-//
-// Something else: It's possible call all manner of rubbish in JavaScript. This implicitly supports
-//     bizarre object callees, but it can't really tell you anything interesting about them other
-//     than the fact that they don't fall into any of the above categories.
-//
-// This class serves as a kind of union over these four things. It does so by just holding a
-// JSCell*. We determine which of the modes its in by doing type checks on the cell. Note that there
-// is no lifecycle management for the cell because this class is always used in contexts where we
-// either do custom weak reference logic over instances of this class (see CallEdgeProfile), or we
-// are inside the compiler and we assume that the compiler runs in between collections and so can
-// touch the heap without notifying anyone.
-
-class CallVariant {
-public:
-    explicit CallVariant(JSCell* callee = nullptr)
-        : m_callee(callee)
-    {
-    }
-    
-    CallVariant(WTF::HashTableDeletedValueType)
-        : m_callee(deletedToken())
-    {
-    }
-    
-    bool operator!() const { return !m_callee; }
-    
-    // If this variant refers to a function, change it to refer to its executable.
-    ALWAYS_INLINE CallVariant despecifiedClosure() const
-    {
-        if (m_callee->type() == JSFunctionType)
-            return CallVariant(jsCast<JSFunction*>(m_callee)->executable());
-        return *this;
-    }
-    
-    JSCell* rawCalleeCell() const { return m_callee; }
-    
-    InternalFunction* internalFunction() const
-    {
-        return jsDynamicCast<InternalFunction*>(m_callee);
-    }
-    
-    JSFunction* function() const
-    {
-        return jsDynamicCast<JSFunction*>(m_callee);
-    }
-    
-    bool isClosureCall() const { return !!jsDynamicCast<ExecutableBase*>(m_callee); }
-    
-    ExecutableBase* executable() const
-    {
-        if (JSFunction* function = this->function())
-            return function->executable();
-        return jsDynamicCast<ExecutableBase*>(m_callee);
-    }
-    
-    JSCell* nonExecutableCallee() const
-    {
-        RELEASE_ASSERT(!isClosureCall());
-        return m_callee;
-    }
-    
-    Intrinsic intrinsicFor(CodeSpecializationKind kind) const
-    {
-        if (ExecutableBase* executable = this->executable())
-            return executable->intrinsicFor(kind);
-        return NoIntrinsic;
-    }
-    
-    FunctionExecutable* functionExecutable() const
-    {
-        if (ExecutableBase* executable = this->executable())
-            return jsDynamicCast<FunctionExecutable*>(executable);
-        return nullptr;
-    }
-    
-    void dump(PrintStream& out) const;
-    
-    bool isHashTableDeletedValue() const
-    {
-        return m_callee == deletedToken();
-    }
-    
-    bool operator==(const CallVariant& other) const
-    {
-        return m_callee == other.m_callee;
-    }
-    
-    bool operator!=(const CallVariant& other) const
-    {
-        return !(*this == other);
-    }
-    
-    bool operator<(const CallVariant& other) const
-    {
-        return m_callee < other.m_callee;
-    }
-    
-    bool operator>(const CallVariant& other) const
-    {
-        return other < *this;
-    }
-    
-    bool operator<=(const CallVariant& other) const
-    {
-        return !(*this < other);
-    }
-    
-    bool operator>=(const CallVariant& other) const
-    {
-        return other <= *this;
-    }
-    
-    unsigned hash() const
-    {
-        return WTF::PtrHash<JSCell*>::hash(m_callee);
-    }
-    
-private:
-    static JSCell* deletedToken() { return bitwise_cast<JSCell*>(static_cast<uintptr_t>(1)); }
-    
-    JSCell* m_callee;
-};
-
-struct CallVariantHash {
-    static unsigned hash(const CallVariant& key) { return key.hash(); }
-    static bool equal(const CallVariant& a, const CallVariant& b) { return a == b; }
-    static const bool safeToCompareToEmptyOrDeleted = true;
-};
-
-typedef Vector<CallVariant, 1> CallVariantList;
-
-} // namespace JSC
-
-namespace WTF {
-
-template<typename T> struct DefaultHash;
-template<> struct DefaultHash<JSC::CallVariant> {
-    typedef JSC::CallVariantHash Hash;
-};
-
-template<typename T> struct HashTraits;
-template<> struct HashTraits<JSC::CallVariant> : SimpleClassHashTraits<JSC::CallVariant> { };
-
-} // namespace WTF
-
-#endif // CallVariant_h
-
index 03dd781..406d3d0 100644 (file)
@@ -154,17 +154,6 @@ struct InlineCallFrame {
         return CodeForCall;
     }
     
-    static bool isNormalCall(Kind kind)
-    {
-        switch (kind) {
-        case Call:
-        case Construct:
-            return true;
-        default:
-            return false;
-        }
-    }
-    
     Vector<ValueRecovery> arguments; // Includes 'this'.
     WriteBarrier<ScriptExecutable> executable;
     ValueRecovery calleeRecovery;
index 87ad2ed..efd7c10 100644 (file)
@@ -38,8 +38,8 @@ const char* exitKindToString(ExitKind kind)
         return "Unset";
     case BadType:
         return "BadType";
-    case BadCell:
-        return "BadCell";
+    case BadFunction:
+        return "BadFunction";
     case BadExecutable:
         return "BadExecutable";
     case BadCache:
index 150135d..5296812 100644 (file)
@@ -31,7 +31,7 @@ namespace JSC {
 enum ExitKind : uint8_t {
     ExitKindUnset,
     BadType, // We exited because a type prediction was wrong.
-    BadCell, // We exited because we made an incorrect assumption about what cell we would see. Usually used for function checks.
+    BadFunction, // We exited because we made an incorrect assumption about what function we would see.
     BadExecutable, // We exited because we made an incorrect assumption about what executable we would see.
     BadCache, // We exited because an inline cache was wrong.
     BadConstantCache, // We exited because a cache on a weak constant (usually a prototype) was wrong.
index ecc65a2..4b52637 100644 (file)
@@ -187,8 +187,7 @@ GetByIdStatus GetByIdStatus::computeForStubInfo(
                     AccessorCallJITStubRoutine* stub = static_cast<AccessorCallJITStubRoutine*>(
                         list->at(listIndex).stubRoutine());
                     callLinkStatus = std::make_unique<CallLinkStatus>(
-                        CallLinkStatus::computeFor(
-                            locker, profiledBlock, *stub->m_callLinkInfo, callExitSiteData));
+                        CallLinkStatus::computeFor(locker, *stub->m_callLinkInfo, callExitSiteData));
                     break;
                 }
                 case GetByIdAccess::CustomGetter:
index 1a420e5..939077e 100644 (file)
@@ -247,7 +247,7 @@ PutByIdStatus PutByIdStatus::computeForStubInfo(
                     std::unique_ptr<CallLinkStatus> callLinkStatus =
                         std::make_unique<CallLinkStatus>(
                             CallLinkStatus::computeFor(
-                                locker, profiledBlock, *stub->m_callLinkInfo, callExitSiteData));
+                                locker, *stub->m_callLinkInfo, callExitSiteData));
                     
                     variant = PutByIdVariant::setter(
                         structure, complexGetStatus.offset(), complexGetStatus.chain(),
index 22f8d0e..2cd8f68 100644 (file)
@@ -1459,6 +1459,14 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi
         forNode(node).setType(SpecInt32);
         break;
         
+    case CheckExecutable: {
+        // FIXME: We could track executables in AbstractValue, which would allow us to get rid of these checks
+        // more thoroughly. https://bugs.webkit.org/show_bug.cgi?id=106200
+        // FIXME: We could eliminate these entirely if we know the exact value that flows into this.
+        // https://bugs.webkit.org/show_bug.cgi?id=106201
+        break;
+    }
+
     case CheckStructure: {
         // FIXME: We should be able to propagate the structure sets of constants (i.e. prototypes).
         AbstractValue& value = forNode(node->child1());
@@ -1718,29 +1726,16 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi
             m_state.setIsValid(false);
         break;
     }
-        
-    case GetExecutable: {
-        JSValue value = forNode(node->child1()).value();
-        if (value) {
-            JSFunction* function = jsDynamicCast<JSFunction*>(value);
-            if (function) {
-                setConstant(node, *m_graph.freeze(function->executable()));
-                break;
-            }
-        }
-        forNode(node).setType(SpecCellOther);
-        break;
-    }
     
-    case CheckCell: {
+    case CheckFunction: {
         JSValue value = forNode(node->child1()).value();
-        if (value == node->cellOperand()->value()) {
+        if (value == node->function()->value()) {
             m_state.setFoundConstants(true);
             ASSERT(value);
             break;
         }
         
-        filterByValue(node->child1(), *node->cellOperand());
+        filterByValue(node->child1(), *node->function());
         break;
     }
         
@@ -1864,6 +1859,8 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi
         
     case VariableWatchpoint:
     case VarInjectionWatchpoint:
+        break;
+            
     case PutGlobalVar:
     case NotifyWrite:
         break;
@@ -1903,16 +1900,7 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi
         forNode(node).makeHeapTop();
         break;
 
-    case ProfiledCall:
-    case ProfiledConstruct:
-        if (forNode(m_graph.varArgChild(node, 0)).m_value)
-            m_state.setFoundConstants(true);
-        clobberWorld(node->origin.semantic, clobberLimit);
-        forNode(node).makeHeapTop();
-        break;
-
     case ForceOSRExit:
-    case CheckBadCell:
         m_state.setIsValid(false);
         break;
         
@@ -1967,8 +1955,7 @@ bool AbstractInterpreter<AbstractStateType>::executeEffects(unsigned clobberLimi
     case LastNodeType:
     case ArithIMul:
     case FiatInt52:
-    case BottomValue:
-        DFG_CRASH(m_graph, node, "Unexpected node type");
+        RELEASE_ASSERT_NOT_REACHED();
         break;
     }
     
index 85859d2..e2d3a83 100644 (file)
@@ -389,11 +389,6 @@ private:
                 // then -0 and 0 are treated the same.
                 node->child1()->mergeFlags(NodeBytecodeUsesAsNumber | NodeBytecodeUsesAsOther);
                 break;
-            case SwitchCell:
-                // There is currently no point to being clever here since this is used for switching
-                // on objects.
-                mergeDefaultFlags(node);
-                break;
             }
             break;
         }
index 14a2153..733f8c2 100644 (file)
@@ -58,9 +58,7 @@ BasicBlock::BasicBlock(
 {
 }
 
-BasicBlock::~BasicBlock()
-{
-}
+BasicBlock::~BasicBlock() { }
 
 void BasicBlock::ensureLocals(unsigned newNumLocals)
 {
index dfbd880..8099407 100644 (file)
@@ -62,7 +62,6 @@ struct BasicBlock : RefCounted<BasicBlock> {
     Node*& operator[](size_t i) { return at(i); }
     Node* operator[](size_t i) const { return at(i); }
     Node* last() const { return at(size() - 1); }
-    Node* takeLast() { return m_nodes.takeLast(); }
     void resize(size_t size) { m_nodes.resize(size); }
     void grow(size_t size) { m_nodes.grow(size); }
     
@@ -107,13 +106,6 @@ struct BasicBlock : RefCounted<BasicBlock> {
     
     void dump(PrintStream& out) const;
     
-    void didLink()
-    {
-#if !ASSERT_DISABLED
-        isLinked = true;
-#endif
-    }
-    
     // This value is used internally for block linking and OSR entry. It is mostly meaningless
     // for other purposes due to inlining.
     unsigned bytecodeBegin;
index a8ba5b9..3de9e0b 100644 (file)
@@ -50,8 +50,6 @@
 
 namespace JSC { namespace DFG {
 
-static const bool verbose = false;
-
 class ConstantBufferKey {
 public:
     ConstantBufferKey()
@@ -180,20 +178,14 @@ private:
         Node* callTarget, int argCount, int registerOffset, CallLinkStatus);
     void handleCall(int result, NodeType op, CodeSpecializationKind, unsigned instructionSize, int callee, int argCount, int registerOffset);
     void handleCall(Instruction* pc, NodeType op, CodeSpecializationKind);
-    void emitFunctionChecks(CallVariant, Node* callTarget, int registerOffset, CodeSpecializationKind);
-    void undoFunctionChecks(CallVariant);
+    void emitFunctionChecks(const CallLinkStatus&, Node* callTarget, int registerOffset, CodeSpecializationKind);
     void emitArgumentPhantoms(int registerOffset, int argumentCountIncludingThis, CodeSpecializationKind);
-    unsigned inliningCost(CallVariant, int argumentCountIncludingThis, CodeSpecializationKind); // Return UINT_MAX if it's not an inlining candidate. By convention, intrinsics have a cost of 1.
     // Handle inlining. Return true if it succeeded, false if we need to plant a call.
-    bool handleInlining(Node* callTargetNode, int resultOperand, const CallLinkStatus&, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, NodeType callOp, InlineCallFrame::Kind, SpeculatedType prediction);
-    enum CallerLinkability { CallerDoesNormalLinking, CallerLinksManually };
-    bool attemptToInlineCall(Node* callTargetNode, int resultOperand, CallVariant, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind, CallerLinkability, SpeculatedType prediction, unsigned& inliningBalance);
-    void inlineCall(Node* callTargetNode, int resultOperand, CallVariant, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind, CallerLinkability);
-    void cancelLinkingForBlock(InlineStackEntry*, BasicBlock*); // Only works when the given block is the last one to have been added for that inline stack entry.
+    bool handleInlining(Node* callTargetNode, int resultOperand, const CallLinkStatus&, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind);
     // Handle intrinsic functions. Return true if it succeeded, false if we need to plant a call.
     bool handleIntrinsic(int resultOperand, Intrinsic, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction);
     bool handleTypedArrayConstructor(int resultOperand, InternalFunction*, int registerOffset, int argumentCountIncludingThis, TypedArrayType);
-    bool handleConstantInternalFunction(int resultOperand, InternalFunction*, int registerOffset, int argumentCountIncludingThis, CodeSpecializationKind);
+    bool handleConstantInternalFunction(int resultOperand, InternalFunction*, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, CodeSpecializationKind);
     Node* handlePutByOffset(Node* base, unsigned identifier, PropertyOffset, Node* value);
     Node* handleGetByOffset(SpeculatedType, Node* base, const StructureSet&, unsigned identifierNumber, PropertyOffset, NodeType op = GetByOffset);
     void handleGetById(
@@ -208,9 +200,8 @@ private:
 
     Node* getScope(unsigned skipCount);
     
+    // Prepare to parse a block.
     void prepareToParseBlock();
-    void clearCaches();
-
     // Parse a single basic block of bytecode instructions.
     bool parseBlock(unsigned limit);
     // Link block successors.
@@ -305,13 +296,6 @@ private:
         
         return delayed.execute(this, setMode);
     }
-    
-    void processSetLocalQueue()
-    {
-        for (unsigned i = 0; i < m_setLocalQueue.size(); ++i)
-            m_setLocalQueue[i].execute(this);
-        m_setLocalQueue.resize(0);
-    }
 
     Node* set(VirtualRegister operand, Node* value, SetMode setMode = NormalSet)
     {
@@ -653,13 +637,6 @@ private:
         
         return result;
     }
-    
-    void removeLastNodeFromGraph(NodeType expectedNodeType)
-    {
-        Node* node = m_currentBlock->takeLast();
-        RELEASE_ASSERT(node->op() == expectedNodeType);
-        m_graph.m_allocator.free(node);
-    }
 
     void addVarArgChild(Node* child)
     {
@@ -668,7 +645,7 @@ private:
     }
     
     Node* addCallWithoutSettingResult(
-        NodeType op, OpInfo opInfo, Node* callee, int argCount, int registerOffset,
+        NodeType op, Node* callee, int argCount, int registerOffset,
         SpeculatedType prediction)
     {
         addVarArgChild(callee);
@@ -676,19 +653,19 @@ private:
         if (parameterSlots > m_parameterSlots)
             m_parameterSlots = parameterSlots;
 
-        int dummyThisArgument = op == Call || op == NativeCall || op == ProfiledCall ? 0 : 1;
+        int dummyThisArgument = op == Call || op == NativeCall ? 0 : 1;
         for (int i = 0 + dummyThisArgument; i < argCount; ++i)
             addVarArgChild(get(virtualRegisterForArgument(i, registerOffset)));
 
-        return addToGraph(Node::VarArg, op, opInfo, OpInfo(prediction));
+        return addToGraph(Node::VarArg, op, OpInfo(0), OpInfo(prediction));
     }
     
     Node* addCall(
-        int result, NodeType op, OpInfo opInfo, Node* callee, int argCount, int registerOffset,
+        int result, NodeType op, Node* callee, int argCount, int registerOffset,
         SpeculatedType prediction)
     {
         Node* call = addCallWithoutSettingResult(
-            op, opInfo, callee, argCount, registerOffset, prediction);
+            op, callee, argCount, registerOffset, prediction);
         VirtualRegister resultReg(result);
         if (resultReg.isValid())
             set(VirtualRegister(result), call);
@@ -894,7 +871,8 @@ private:
         Vector<UnlinkedBlock> m_unlinkedBlocks;
         
         // Potential block linking targets. Must be sorted by bytecodeBegin, and
-        // cannot have two blocks that have the same bytecodeBegin.
+        // cannot have two blocks that have the same bytecodeBegin. For this very
+        // reason, this is not equivalent to 
         Vector<BasicBlock*> m_blockLinkingTargets;
         
         // If the callsite's basic block was split into two, then this will be
@@ -1041,63 +1019,77 @@ void ByteCodeParser::handleCall(
     CallLinkStatus callLinkStatus, SpeculatedType prediction)
 {
     ASSERT(registerOffset <= 0);
+    CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind);
     
     if (callTarget->hasConstant())
         callLinkStatus = CallLinkStatus(callTarget->asJSValue()).setIsProved(true);
     
-    if ((!callLinkStatus.canOptimize() || callLinkStatus.size() != 1)
-        && !isFTL(m_graph.m_plan.mode) && Options::useFTLJIT()
-        && InlineCallFrame::isNormalCall(kind)
-        && CallEdgeLog::isEnabled()
-        && Options::dfgDoesCallEdgeProfiling()) {
-        ASSERT(op == Call || op == Construct);
-        if (op == Call)
-            op = ProfiledCall;
-        else
-            op = ProfiledConstruct;
-    }
-    
     if (!callLinkStatus.canOptimize()) {
         // Oddly, this conflates calls that haven't executed with calls that behaved sufficiently polymorphically
         // that we cannot optimize them.
         
-        addCall(result, op, OpInfo(), callTarget, argumentCountIncludingThis, registerOffset, prediction);
+        addCall(result, op, callTarget, argumentCountIncludingThis, registerOffset, prediction);
         return;
     }
     
     unsigned nextOffset = m_currentIndex + instructionSize;
-    
-    OpInfo callOpInfo;
-    
-    if (handleInlining(callTarget, result, callLinkStatus, registerOffset, argumentCountIncludingThis, nextOffset, op, kind, prediction)) {
+
+    if (InternalFunction* function = callLinkStatus.internalFunction()) {
+        if (handleConstantInternalFunction(result, function, registerOffset, argumentCountIncludingThis, prediction, specializationKind)) {
+            // This phantoming has to be *after* the code for the intrinsic, to signify that
+            // the inputs must be kept alive whatever exits the intrinsic may do.
+            addToGraph(Phantom, callTarget);
+            emitArgumentPhantoms(registerOffset, argumentCountIncludingThis, specializationKind);
+            return;
+        }
+        
+        // Can only handle this using the generic call handler.
+        addCall(result, op, callTarget, argumentCountIncludingThis, registerOffset, prediction);
+        return;
+    }
+        
+    Intrinsic intrinsic = callLinkStatus.intrinsicFor(specializationKind);
+
+    JSFunction* knownFunction = nullptr;
+    if (intrinsic != NoIntrinsic) {
+        emitFunctionChecks(callLinkStatus, callTarget, registerOffset, specializationKind);
+            
+        if (handleIntrinsic(result, intrinsic, registerOffset, argumentCountIncludingThis, prediction)) {
+            // This phantoming has to be *after* the code for the intrinsic, to signify that
+            // the inputs must be kept alive whatever exits the intrinsic may do.
+            addToGraph(Phantom, callTarget);
+            emitArgumentPhantoms(registerOffset, argumentCountIncludingThis, specializationKind);
+            if (m_graph.compilation())
+                m_graph.compilation()->noticeInlinedCall();
+            return;
+        }
+    } else if (handleInlining(callTarget, result, callLinkStatus, registerOffset, argumentCountIncludingThis, nextOffset, kind)) {
         if (m_graph.compilation())
             m_graph.compilation()->noticeInlinedCall();
         return;
-    }
-    
 #if ENABLE(FTL_NATIVE_CALL_INLINING)
-    if (isFTL(m_graph.m_plan.mode) && Options::optimizeNativeCalls() && callLinkStatus.size() == 1 && !callLinkStatus.couldTakeSlowPath()) {
-        CallVariant callee = callLinkStatus[0].callee();
-        JSFunction* function = callee.function();
-        CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind);
+    } else if (isFTL(m_graph.m_plan.mode) && Options::optimizeNativeCalls()) {
+        JSFunction* function = callLinkStatus.function();
         if (function && function->isHostFunction()) {
-            emitFunctionChecks(callee, callTarget, registerOffset, specializationKind);
-            callOpInfo = OpInfo(m_graph.freeze(function));
+            emitFunctionChecks(callLinkStatus, callTarget, registerOffset, specializationKind);
+            knownFunction = function;
 
-            if (op == Call || op == ProfiledCall)
+            if (op == Call
                 op = NativeCall;
             else {
-                ASSERT(op == Construct || op == ProfiledConstruct);
+                ASSERT(op == Construct);
                 op = NativeConstruct;
             }
         }
-    }
 #endif
-    
-    addCall(result, op, callOpInfo, callTarget, argumentCountIncludingThis, registerOffset, prediction);
+    }
+    Node* call = addCall(result, op, callTarget, argumentCountIncludingThis, registerOffset, prediction);
+
+    if (knownFunction) 
+        call->giveKnownFunction(knownFunction);
 }
 
-void ByteCodeParser::emitFunctionChecks(CallVariant callee, Node* callTarget, int registerOffset, CodeSpecializationKind kind)
+void ByteCodeParser::emitFunctionChecks(const CallLinkStatus& callLinkStatus, Node* callTarget, int registerOffset, CodeSpecializationKind kind)
 {
     Node* thisArgument;
     if (kind == CodeForCall)
@@ -1105,25 +1097,20 @@ void ByteCodeParser::emitFunctionChecks(CallVariant callee, Node* callTarget, in
     else
         thisArgument = 0;
 
-    JSCell* calleeCell;
-    Node* callTargetForCheck;
-    if (callee.isClosureCall()) {
-        calleeCell = callee.executable();
-        callTargetForCheck = addToGraph(GetExecutable, callTarget);
-    } else {
-        calleeCell = callee.nonExecutableCallee();
-        callTargetForCheck = callTarget;
+    if (callLinkStatus.isProved()) {
+        addToGraph(Phantom, callTarget, thisArgument);
+        return;
     }
     
-    ASSERT(calleeCell);
-    addToGraph(CheckCell, OpInfo(m_graph.freeze(calleeCell)), callTargetForCheck, thisArgument);
-}
-
-void ByteCodeParser::undoFunctionChecks(CallVariant callee)
-{
-    removeLastNodeFromGraph(CheckCell);
-    if (callee.isClosureCall())
-        removeLastNodeFromGraph(GetExecutable);
+    ASSERT(callLinkStatus.canOptimize());
+    
+    if (JSFunction* function = callLinkStatus.function())
+        addToGraph(CheckFunction, OpInfo(m_graph.freeze(function)), callTarget, thisArgument);
+    else {
+        ASSERT(callLinkStatus.executable());
+        
+        addToGraph(CheckExecutable, OpInfo(callLinkStatus.executable()), callTarget, thisArgument);
+    }
 }
 
 void ByteCodeParser::emitArgumentPhantoms(int registerOffset, int argumentCountIncludingThis, CodeSpecializationKind kind)
@@ -1132,17 +1119,28 @@ void ByteCodeParser::emitArgumentPhantoms(int registerOffset, int argumentCountI
         addToGraph(Phantom, get(virtualRegisterForArgument(i, registerOffset)));
 }
 
-unsigned ByteCodeParser::inliningCost(CallVariant callee, int argumentCountIncludingThis, CodeSpecializationKind kind)
+bool ByteCodeParser::handleInlining(Node* callTargetNode, int resultOperand, const CallLinkStatus& callLinkStatus, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind kind)
 {
+    static const bool verbose = false;
+    
+    CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind);
+    
     if (verbose)
-        dataLog("Considering inlining ", callee, " into ", currentCodeOrigin(), "\n");
+        dataLog("Considering inlining ", callLinkStatus, " into ", currentCodeOrigin(), "\n");
     
-    FunctionExecutable* executable = callee.functionExecutable();
-    if (!executable) {
+    // First, the really simple checks: do we have an actual JS function?
+    if (!callLinkStatus.executable()) {
         if (verbose)
-            dataLog("    Failing because there is no function executable.");
-        return UINT_MAX;
+            dataLog("    Failing because there is no executable.\n");
+        return false;
     }
+    if (callLinkStatus.executable()->isHostFunction()) {
+        if (verbose)
+            dataLog("    Failing because it's a host function.\n");
+        return false;
+    }
+    
+    FunctionExecutable* executable = jsCast<FunctionExecutable*>(callLinkStatus.executable());
     
     // Does the number of arguments we're passing match the arity of the target? We currently
     // inline only if the number of arguments passed is greater than or equal to the number
@@ -1150,7 +1148,7 @@ unsigned ByteCodeParser::inliningCost(CallVariant callee, int argumentCountInclu
     if (static_cast<int>(executable->parameterCount()) + 1 > argumentCountIncludingThis) {
         if (verbose)
             dataLog("    Failing because of arity mismatch.\n");
-        return UINT_MAX;
+        return false;
     }
     
     // Do we have a code block, and does the code block's size match the heuristics/requirements for
@@ -1159,18 +1157,18 @@ unsigned ByteCodeParser::inliningCost(CallVariant callee, int argumentCountInclu
     // if we had a static proof of what was being called; this might happen for example if you call a
     // global function, where watchpointing gives us static information. Overall, it's a rare case
     // because we expect that any hot callees would have already been compiled.
-    CodeBlock* codeBlock = executable->baselineCodeBlockFor(kind);
+    CodeBlock* codeBlock = executable->baselineCodeBlockFor(specializationKind);
     if (!codeBlock) {
         if (verbose)
             dataLog("    Failing because no code block available.\n");
-        return UINT_MAX;
+        return false;
     }
     CapabilityLevel capabilityLevel = inlineFunctionForCapabilityLevel(
-        codeBlock, kind, callee.isClosureCall());
+        codeBlock, specializationKind, callLinkStatus.isClosureCall());
     if (!canInline(capabilityLevel)) {
         if (verbose)
             dataLog("    Failing because the function is not inlineable.\n");
-        return UINT_MAX;
+        return false;
     }
     
     // Check if the caller is already too large. We do this check here because that's just
@@ -1180,7 +1178,7 @@ unsigned ByteCodeParser::inliningCost(CallVariant callee, int argumentCountInclu
         codeBlock->m_shouldAlwaysBeInlined = false;
         if (verbose)
             dataLog("    Failing because the caller is too large.\n");
-        return UINT_MAX;
+        return false;
     }
     
     // FIXME: this should be better at predicting how much bloat we will introduce by inlining
@@ -1199,7 +1197,7 @@ unsigned ByteCodeParser::inliningCost(CallVariant callee, int argumentCountInclu
         if (depth >= Options::maximumInliningDepth()) {
             if (verbose)
                 dataLog("    Failing because depth exceeded.\n");
-            return UINT_MAX;
+            return false;
         }
         
         if (entry->executable() == executable) {
@@ -1207,26 +1205,19 @@ unsigned ByteCodeParser::inliningCost(CallVariant callee, int argumentCountInclu
             if (recursion >= Options::maximumInliningRecursion()) {
                 if (verbose)
                     dataLog("    Failing because recursion detected.\n");
-                return UINT_MAX;
+                return false;
             }
         }
     }
     
     if (verbose)
-        dataLog("    Inlining should be possible.\n");
+        dataLog("    Committing to inlining.\n");
     
-    // It might be possible to inline.
-    return codeBlock->instructionCount();
-}
-
-void ByteCodeParser::inlineCall(Node* callTargetNode, int resultOperand, CallVariant callee, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind kind, CallerLinkability callerLinkability)
-{
-    CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind);
-    
-    ASSERT(inliningCost(callee, argumentCountIncludingThis, specializationKind) != UINT_MAX);
+    // Now we know without a doubt that we are committed to inlining. So begin the process
+    // by checking the callee (if necessary) and making sure that arguments and the callee
+    // are flushed.
+    emitFunctionChecks(callLinkStatus, callTargetNode, registerOffset, specializationKind);
     
-    CodeBlock* codeBlock = callee.functionExecutable()->baselineCodeBlockFor(specializationKind);
-
     // FIXME: Don't flush constants!
     
     int inlineCallFrameStart = m_inlineStackTop->remapOperand(VirtualRegister(registerOffset)).offset() + JSStack::CallFrameHeaderSize;
@@ -1242,7 +1233,7 @@ void ByteCodeParser::inlineCall(Node* callTargetNode, int resultOperand, CallVar
         resultReg = m_inlineStackTop->remapOperand(resultReg);
     
     InlineStackEntry inlineStackEntry(
-        this, codeBlock, codeBlock, m_graph.lastBlock(), callee.function(), resultReg,
+        this, codeBlock, codeBlock, m_graph.lastBlock(), callLinkStatus.function(), resultReg,
         (VirtualRegister)inlineCallFrameStart, argumentCountIncludingThis, kind);
     
     // This is where the actual inlining really happens.
@@ -1256,8 +1247,8 @@ void ByteCodeParser::inlineCall(Node* callTargetNode, int resultOperand, CallVar
     
     RELEASE_ASSERT(
         m_inlineStackTop->m_inlineCallFrame->isClosureCall
-        == callee.isClosureCall());
-    if (callee.isClosureCall()) {
+        == callLinkStatus.isClosureCall());
+    if (callLinkStatus.isClosureCall()) {
         VariableAccessData* calleeVariable =
             set(VirtualRegister(JSStack::Callee), callTargetNode, ImmediateNakedSet)->variableAccessData();
         VariableAccessData* scopeVariable =
@@ -1272,7 +1263,7 @@ void ByteCodeParser::inlineCall(Node* callTargetNode, int resultOperand, CallVar
     m_graph.m_inlineVariableData.append(inlineVariableData);
     
     parseCodeBlock();
-    clearCaches(); // Reset our state now that we're back to the outer code.
+    prepareToParseBlock(); // Reset our state now that we're back to the outer code.
     
     m_currentIndex = oldIndex;
     
@@ -1285,8 +1276,20 @@ void ByteCodeParser::inlineCall(Node* callTargetNode, int resultOperand, CallVar
         else
             ASSERT(inlineStackEntry.m_callsiteBlockHead->isLinked);
         
-        if (callerLinkability == CallerDoesNormalLinking)
-            cancelLinkingForBlock(inlineStackEntry.m_caller, inlineStackEntry.m_callsiteBlockHead);
+        // It's possible that the callsite block head is not owned by the caller.
+        if (!inlineStackEntry.m_caller->m_unlinkedBlocks.isEmpty()) {
+            // It's definitely owned by the caller, because the caller created new blocks.
+            // Assert that this all adds up.
+            ASSERT(inlineStackEntry.m_caller->m_unlinkedBlocks.last().m_block == inlineStackEntry.m_callsiteBlockHead);
+            ASSERT(inlineStackEntry.m_caller->m_unlinkedBlocks.last().m_needsNormalLinking);
+            inlineStackEntry.m_caller->m_unlinkedBlocks.last().m_needsNormalLinking = false;
+        } else {
+            // It's definitely not owned by the caller. Tell the caller that he does not
+            // need to link his callsite block head, because we did it for him.
+            ASSERT(inlineStackEntry.m_caller->m_callsiteBlockHeadNeedsLinking);
+            ASSERT(inlineStackEntry.m_caller->m_callsiteBlockHead == inlineStackEntry.m_callsiteBlockHead);
+            inlineStackEntry.m_caller->m_callsiteBlockHeadNeedsLinking = false;
+        }
         
         linkBlocks(inlineStackEntry.m_unlinkedBlocks, inlineStackEntry.m_blockLinkingTargets);
     } else
@@ -1305,19 +1308,16 @@ void ByteCodeParser::inlineCall(Node* callTargetNode, int resultOperand, CallVar
             // for release builds because this block will never serve as a potential target
             // in the linker's binary search.
             lastBlock->bytecodeBegin = m_currentIndex;
-            if (callerLinkability == CallerDoesNormalLinking) {
-                if (verbose)
-                    dataLog("Adding unlinked block ", RawPointer(m_graph.lastBlock()), " (one return)\n");
-                m_inlineStackTop->m_caller->m_unlinkedBlocks.append(UnlinkedBlock(m_graph.lastBlock()));
-            }
+            m_inlineStackTop->m_caller->m_unlinkedBlocks.append(UnlinkedBlock(m_graph.lastBlock()));
         }
         
         m_currentBlock = m_graph.lastBlock();
-        return;
+        return true;
     }
     
     // If we get to this point then all blocks must end in some sort of terminals.
     ASSERT(lastBlock->last()->isTerminal());
+    
 
     // Need to create a new basic block for the continuation at the caller.
     RefPtr<BasicBlock> block = adoptRef(new BasicBlock(nextOffset, m_numArguments, m_numLocals, PNaN));
@@ -1333,293 +1333,20 @@ void ByteCodeParser::inlineCall(Node* callTargetNode, int resultOperand, CallVar
         ASSERT(!node->targetBlock());
         node->targetBlock() = block.get();
         inlineStackEntry.m_unlinkedBlocks[i].m_needsEarlyReturnLinking = false;
-        if (verbose)
-            dataLog("Marking ", RawPointer(blockToLink), " as linked (jumps to return)\n");
-        blockToLink->didLink();
+#if !ASSERT_DISABLED
+        blockToLink->isLinked = true;
+#endif
     }
     
     m_currentBlock = block.get();
     ASSERT(m_inlineStackTop->m_caller->m_blockLinkingTargets.isEmpty() || m_inlineStackTop->m_caller->m_blockLinkingTargets.last()->bytecodeBegin < nextOffset);
-    if (verbose)
-        dataLog("Adding unlinked block ", RawPointer(block.get()), " (many returns)\n");
-    if (callerLinkability == CallerDoesNormalLinking) {
-        m_inlineStackTop->m_caller->m_unlinkedBlocks.append(UnlinkedBlock(block.get()));
-        m_inlineStackTop->m_caller->m_blockLinkingTargets.append(block.get());
-    }
+    m_inlineStackTop->m_caller->m_unlinkedBlocks.append(UnlinkedBlock(block.get()));
+    m_inlineStackTop->m_caller->m_blockLinkingTargets.append(block.get());
     m_graph.appendBlock(block);
     prepareToParseBlock();
-}
-
-void ByteCodeParser::cancelLinkingForBlock(InlineStackEntry* inlineStackEntry, BasicBlock* block)
-{
-    // It's possible that the callsite block head is not owned by the caller.
-    if (!inlineStackEntry->m_unlinkedBlocks.isEmpty()) {
-        // It's definitely owned by the caller, because the caller created new blocks.
-        // Assert that this all adds up.
-        ASSERT_UNUSED(block, inlineStackEntry->m_unlinkedBlocks.last().m_block == block);
-        ASSERT(inlineStackEntry->m_unlinkedBlocks.last().m_needsNormalLinking);
-        inlineStackEntry->m_unlinkedBlocks.last().m_needsNormalLinking = false;
-    } else {
-        // It's definitely not owned by the caller. Tell the caller that he does not
-        // need to link his callsite block head, because we did it for him.
-        ASSERT(inlineStackEntry->m_callsiteBlockHeadNeedsLinking);
-        ASSERT_UNUSED(block, inlineStackEntry->m_callsiteBlockHead == block);
-        inlineStackEntry->m_callsiteBlockHeadNeedsLinking = false;
-    }
-}
-
-bool ByteCodeParser::attemptToInlineCall(Node* callTargetNode, int resultOperand, CallVariant callee, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind kind, CallerLinkability callerLinkability, SpeculatedType prediction, unsigned& inliningBalance)
-{
-    CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind);
-    
-    if (!inliningBalance)
-        return false;
-    
-    if (InternalFunction* function = callee.internalFunction()) {
-        if (handleConstantInternalFunction(resultOperand, function, registerOffset, argumentCountIncludingThis, specializationKind)) {
-            addToGraph(Phantom, callTargetNode);
-            emitArgumentPhantoms(registerOffset, argumentCountIncludingThis, specializationKind);
-            inliningBalance--;
-            return true;
-        }
-        return false;
-    }
-    
-    Intrinsic intrinsic = callee.intrinsicFor(specializationKind);
-    if (intrinsic != NoIntrinsic) {
-        if (handleIntrinsic(resultOperand, intrinsic, registerOffset, argumentCountIncludingThis, prediction)) {
-            addToGraph(Phantom, callTargetNode);
-            emitArgumentPhantoms(registerOffset, argumentCountIncludingThis, specializationKind);
-            inliningBalance--;
-            return true;
-        }
-        return false;
-    }
-    
-    unsigned myInliningCost = inliningCost(callee, argumentCountIncludingThis, specializationKind);
-    if (myInliningCost > inliningBalance)
-        return false;
-    
-    inlineCall(callTargetNode, resultOperand, callee, registerOffset, argumentCountIncludingThis, nextOffset, kind, callerLinkability);
-    inliningBalance -= myInliningCost;
-    return true;
-}
-
-bool ByteCodeParser::handleInlining(Node* callTargetNode, int resultOperand, const CallLinkStatus& callLinkStatus, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, NodeType callOp, InlineCallFrame::Kind kind, SpeculatedType prediction)
-{
-    if (verbose) {
-        dataLog("Handling inlining...\n");
-        dataLog("Stack: ", currentCodeOrigin(), "\n");
-    }
-    CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind);
-    
-    if (!callLinkStatus.size()) {
-        if (verbose)
-            dataLog("Bailing inlining.\n");
-        return false;
-    }
-    
-    unsigned inliningBalance = Options::maximumFunctionForCallInlineCandidateInstructionCount();
-    if (specializationKind == CodeForConstruct)
-        inliningBalance = std::min(inliningBalance, Options::maximumFunctionForConstructInlineCandidateInstructionCount());
-    if (callLinkStatus.isClosureCall())
-        inliningBalance = std::min(inliningBalance, Options::maximumFunctionForClosureCallInlineCandidateInstructionCount());
-    
-    // First check if we can avoid creating control flow. Our inliner does some CFG
-    // simplification on the fly and this helps reduce compile times, but we can only leverage
-    // this in cases where we don't need control flow diamonds to check the callee.
-    if (!callLinkStatus.couldTakeSlowPath() && callLinkStatus.size() == 1) {
-        emitFunctionChecks(
-            callLinkStatus[0].callee(), callTargetNode, registerOffset, specializationKind);
-        bool result = attemptToInlineCall(
-            callTargetNode, resultOperand, callLinkStatus[0].callee(), registerOffset,
-            argumentCountIncludingThis, nextOffset, kind, CallerDoesNormalLinking, prediction,
-            inliningBalance);
-        if (!result && !callLinkStatus.isProved())
-            undoFunctionChecks(callLinkStatus[0].callee());
-        if (verbose) {
-            dataLog("Done inlining (simple).\n");
-            dataLog("Stack: ", currentCodeOrigin(), "\n");
-        }
-        return result;
-    }
-    
-    // We need to create some kind of switch over callee. For now we only do this if we believe that
-    // we're in the top tier. We have two reasons for this: first, it provides us an opportunity to
-    // do more detailed polyvariant/polymorphic profiling; and second, it reduces compile times in
-    // the DFG. And by polyvariant profiling we mean polyvariant profiling of *this* call. Note that
-    // we could improve that aspect of this by doing polymorphic inlining but having the profiling
-    // also. Currently we opt against this, but it could be interesting. That would require having a
-    // separate node for call edge profiling.
-    // FIXME: Introduce the notion of a separate call edge profiling node.
-    // https://bugs.webkit.org/show_bug.cgi?id=136033
-    if (!isFTL(m_graph.m_plan.mode) || !Options::enablePolymorphicCallInlining()) {
-        if (verbose) {
-            dataLog("Bailing inlining (hard).\n");
-            dataLog("Stack: ", currentCodeOrigin(), "\n");
-        }
-        return false;
-    }
-    
-    unsigned oldOffset = m_currentIndex;
-    
-    bool allAreClosureCalls = true;
-    bool allAreDirectCalls = true;
-    for (unsigned i = callLinkStatus.size(); i--;) {
-        if (callLinkStatus[i].callee().isClosureCall())
-            allAreDirectCalls = false;
-        else
-            allAreClosureCalls = false;
-    }
-    
-    Node* thingToSwitchOn;
-    if (allAreDirectCalls)
-        thingToSwitchOn = callTargetNode;
-    else if (allAreClosureCalls)
-        thingToSwitchOn = addToGraph(GetExecutable, callTargetNode);
-    else {
-        // FIXME: We should be able to handle this case, but it's tricky and we don't know of cases
-        // where it would be beneficial. Also, CallLinkStatus would make all callees appear like
-        // closure calls if any calls were closure calls - except for calls to internal functions.
-        // So this will only arise if some callees are internal functions and others are closures.
-        // https://bugs.webkit.org/show_bug.cgi?id=136020
-        if (verbose) {
-            dataLog("Bailing inlining (mix).\n");
-            dataLog("Stack: ", currentCodeOrigin(), "\n");
-        }
-        return false;
-    }
-    
-    if (verbose) {
-        dataLog("Doing hard inlining...\n");
-        dataLog("Stack: ", currentCodeOrigin(), "\n");
-    }
-    
-    // This makes me wish that we were in SSA all the time. We need to pick a variable into which to
-    // store the callee so that it will be accessible to all of the blocks we're about to create. We
-    // get away with doing an immediate-set here because we wouldn't have performed any side effects
-    // yet.
-    if (verbose)
-        dataLog("Register offset: ", registerOffset);
-    VirtualRegister calleeReg(registerOffset + JSStack::Callee);
-    calleeReg = m_inlineStackTop->remapOperand(calleeReg);
-    if (verbose)
-        dataLog("Callee is going to be ", calleeReg, "\n");
-    setDirect(calleeReg, callTargetNode, ImmediateSetWithFlush);
-    
-    SwitchData& data = *m_graph.m_switchData.add();
-    data.kind = SwitchCell;
-    addToGraph(Switch, OpInfo(&data), thingToSwitchOn);
-    
-    BasicBlock* originBlock = m_currentBlock;
-    if (verbose)
-        dataLog("Marking ", RawPointer(originBlock), " as linked (origin of poly inline)\n");
-    originBlock->didLink();
-    cancelLinkingForBlock(m_inlineStackTop, originBlock);
-    
-    // Each inlined callee will have a landing block that it returns at. They should all have jumps
-    // to the continuation block, which we create last.
-    Vector<BasicBlock*> landingBlocks;
-    
-    // We make force this true if we give up on inlining any of the edges.
-    bool couldTakeSlowPath = callLinkStatus.couldTakeSlowPath();
-    
-    if (verbose)
-        dataLog("About to loop over functions at ", currentCodeOrigin(), ".\n");
-    
-    for (unsigned i = 0; i < callLinkStatus.size(); ++i) {
-        m_currentIndex = oldOffset;
-        RefPtr<BasicBlock> block = adoptRef(new BasicBlock(UINT_MAX, m_numArguments, m_numLocals, PNaN));
-        m_currentBlock = block.get();
-        m_graph.appendBlock(block);
-        prepareToParseBlock();
-        
-        Node* myCallTargetNode = getDirect(calleeReg);
-        
-        bool inliningResult = attemptToInlineCall(
-            myCallTargetNode, resultOperand, callLinkStatus[i].callee(), registerOffset,
-            argumentCountIncludingThis, nextOffset, kind, CallerLinksManually, prediction,
-            inliningBalance);
-        
-        if (!inliningResult) {
-            // That failed so we let the block die. Nothing interesting should have been added to
-            // the block. We also give up on inlining any of the (less frequent) callees.
-            ASSERT(m_currentBlock == block.get());
-            ASSERT(m_graph.m_blocks.last() == block);
-            m_graph.killBlockAndItsContents(block.get());
-            m_graph.m_blocks.removeLast();
-            
-            // The fact that inlining failed means we need a slow path.
-            couldTakeSlowPath = true;
-            break;
-        }
-        
-        JSCell* thingToCaseOn;
-        if (allAreDirectCalls)
-            thingToCaseOn = callLinkStatus[i].callee().nonExecutableCallee();
-        else {
-            ASSERT(allAreClosureCalls);
-            thingToCaseOn = callLinkStatus[i].callee().executable();
-        }
-        data.cases.append(SwitchCase(m_graph.freeze(thingToCaseOn), block.get()));
-        m_currentIndex = nextOffset;
-        processSetLocalQueue(); // This only comes into play for intrinsics, since normal inlined code will leave an empty queue.
-        addToGraph(Jump);
-        if (verbose)
-            dataLog("Marking ", RawPointer(m_currentBlock), " as linked (tail of poly inlinee)\n");
-        m_currentBlock->didLink();
-        landingBlocks.append(m_currentBlock);
-
-        if (verbose)
-            dataLog("Finished inlining ", callLinkStatus[i].callee(), " at ", currentCodeOrigin(), ".\n");
-    }
-    
-    RefPtr<BasicBlock> slowPathBlock = adoptRef(
-        new BasicBlock(UINT_MAX, m_numArguments, m_numLocals, PNaN));
-    m_currentIndex = oldOffset;
-    data.fallThrough = BranchTarget(slowPathBlock.get());
-    m_graph.appendBlock(slowPathBlock);
-    if (verbose)
-        dataLog("Marking ", RawPointer(slowPathBlock.get()), " as linked (slow path block)\n");
-    slowPathBlock->didLink();
-    prepareToParseBlock();
-    m_currentBlock = slowPathBlock.get();
-    Node* myCallTargetNode = getDirect(calleeReg);
-    if (couldTakeSlowPath) {
-        addCall(
-            resultOperand, callOp, OpInfo(), myCallTargetNode, argumentCountIncludingThis,
-            registerOffset, prediction);
-    } else {
-        addToGraph(CheckBadCell);
-        addToGraph(Phantom, myCallTargetNode);
-        emitArgumentPhantoms(registerOffset, argumentCountIncludingThis, specializationKind);
-        
-        set(VirtualRegister(resultOperand), addToGraph(BottomValue));
-    }
-
-    m_currentIndex = nextOffset;
-    processSetLocalQueue();
-    addToGraph(Jump);
-    landingBlocks.append(m_currentBlock);
-    
-    RefPtr<BasicBlock> continuationBlock = adoptRef(
-        new BasicBlock(UINT_MAX, m_numArguments, m_numLocals, PNaN));
-    m_graph.appendBlock(continuationBlock);
-    if (verbose)
-        dataLog("Adding unlinked block ", RawPointer(continuationBlock.get()), " (continuation)\n");
-    m_inlineStackTop->m_unlinkedBlocks.append(UnlinkedBlock(continuationBlock.get()));
-    prepareToParseBlock();
-    m_currentBlock = continuationBlock.get();
     
-    for (unsigned i = landingBlocks.size(); i--;)
-        landingBlocks[i]->last()->targetBlock() = continuationBlock.get();
-    
-    m_currentIndex = oldOffset;
-    
-    if (verbose) {
-        dataLog("Done inlining (hard).\n");
-        dataLog("Stack: ", currentCodeOrigin(), "\n");
-    }
+    // At this point we return and continue to generate code for the caller, but
+    // in the new basic block.
     return true;
 }
 
@@ -1918,7 +1645,7 @@ bool ByteCodeParser::handleTypedArrayConstructor(
 
 bool ByteCodeParser::handleConstantInternalFunction(
     int resultOperand, InternalFunction* function, int registerOffset,
-    int argumentCountIncludingThis, CodeSpecializationKind kind)
+    int argumentCountIncludingThis, SpeculatedType prediction, CodeSpecializationKind kind)
 {
     // If we ever find that we have a lot of internal functions that we specialize for,
     // then we should probably have some sort of hashtable dispatch, or maybe even
@@ -1927,6 +1654,8 @@ bool ByteCodeParser::handleConstantInternalFunction(
     // we know about is small enough, that having just a linear cascade of if statements
     // is good enough.
     
+    UNUSED_PARAM(prediction); // Remove this once we do more things.
+    
     if (function->classInfo() == ArrayConstructor::info()) {
         if (function->globalObject() != m_inlineStackTop->m_codeBlock->globalObject())
             return false;
@@ -2291,12 +2020,6 @@ void ByteCodeParser::handlePutById(
 
 void ByteCodeParser::prepareToParseBlock()
 {
-    clearCaches();
-    ASSERT(m_setLocalQueue.isEmpty());
-}
-
-void ByteCodeParser::clearCaches()
-{
     m_constants.resize(0);
 }
 
@@ -2336,7 +2059,9 @@ bool ByteCodeParser::parseBlock(unsigned limit)
     }
 
     while (true) {
-        processSetLocalQueue();
+        for (unsigned i = 0; i < m_setLocalQueue.size(); ++i)
+            m_setLocalQueue[i].execute(this);
+        m_setLocalQueue.resize(0);
         
         // Don't extend over jump destinations.
         if (m_currentIndex == limit) {
@@ -2480,13 +2205,13 @@ bool ByteCodeParser::parseBlock(unsigned limit)
             JSCell* cachedFunction = currentInstruction[2].u.jsCell.get();
             if (!cachedFunction 
                 || m_inlineStackTop->m_profiledBlock->couldTakeSlowCase(m_currentIndex)
-                || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell)) {
+                || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadFunction)) {
                 set(VirtualRegister(currentInstruction[1].u.operand), get(VirtualRegister(JSStack::Callee)));
             } else {
                 FrozenValue* frozen = m_graph.freeze(cachedFunction);
                 ASSERT(cachedFunction->inherits(JSFunction::info()));
                 Node* actualCallee = get(VirtualRegister(JSStack::Callee));
-                addToGraph(CheckCell, OpInfo(frozen), actualCallee);
+                addToGraph(CheckFunction, OpInfo(frozen), actualCallee);
                 set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(JSConstant, OpInfo(frozen)));
             }
             NEXT_OPCODE(op_get_callee);
@@ -3168,7 +2893,7 @@ bool ByteCodeParser::parseBlock(unsigned limit)
             // already gnarly enough as it is.
             ASSERT(pointerIsFunction(currentInstruction[2].u.specialPointer));
             addToGraph(
-                CheckCell,
+                CheckFunction,
                 OpInfo(m_graph.freeze(static_cast<JSCell*>(actualPointerFor(
                     m_inlineStackTop->m_codeBlock, currentInstruction[2].u.specialPointer)))),
                 get(VirtualRegister(currentInstruction[1].u.operand)));
@@ -3592,19 +3317,15 @@ void ByteCodeParser::linkBlock(BasicBlock* block, Vector<BasicBlock*>& possibleT
         break;
     }
     
-    if (verbose)
-        dataLog("Marking ", RawPointer(block), " as linked (actually did linking)\n");
-    block->didLink();
+#if !ASSERT_DISABLED
+    block->isLinked = true;
+#endif
 }
 
 void ByteCodeParser::linkBlocks(Vector<UnlinkedBlock>& unlinkedBlocks, Vector<BasicBlock*>& possibleTargets)
 {
     for (size_t i = 0; i < unlinkedBlocks.size(); ++i) {
-        if (verbose)
-            dataLog("Attempting to link ", RawPointer(unlinkedBlocks[i].m_block), "\n");
         if (unlinkedBlocks[i].m_needsNormalLinking) {
-            if (verbose)
-                dataLog("    Does need normal linking.\n");
             linkBlock(unlinkedBlocks[i].m_block, possibleTargets);
             unlinkedBlocks[i].m_needsNormalLinking = false;
         }
@@ -3771,7 +3492,7 @@ ByteCodeParser::InlineStackEntry::InlineStackEntry(
 
 void ByteCodeParser::parseCodeBlock()
 {
-    clearCaches();
+    prepareToParseBlock();
     
     CodeBlock* codeBlock = m_inlineStackTop->m_codeBlock;
     
@@ -3837,12 +3558,7 @@ void ByteCodeParser::parseCodeBlock()
                     // 2) If the bytecodeBegin is equal to the currentIndex, then we failed to do
                     //    a peephole coalescing of this block in the if statement above. So, we're
                     //    generating suboptimal code and leaving more work for the CFG simplifier.
-                    if (!m_inlineStackTop->m_unlinkedBlocks.isEmpty()) {
-                        unsigned lastBegin =
-                            m_inlineStackTop->m_unlinkedBlocks.last().m_block->bytecodeBegin;
-                        ASSERT_UNUSED(
-                            lastBegin, lastBegin == UINT_MAX || lastBegin < m_currentIndex);
-                    }
+                    ASSERT(m_inlineStackTop->m_unlinkedBlocks.isEmpty() || m_inlineStackTop->m_unlinkedBlocks.last().m_block->bytecodeBegin < m_currentIndex);
                     m_inlineStackTop->m_unlinkedBlocks.append(UnlinkedBlock(block.get()));
                     m_inlineStackTop->m_blockLinkingTargets.append(block.get());
                     // The first block is definitely an OSR target.
index b7ef0d0..80d49c1 100644 (file)
@@ -90,10 +90,8 @@ private:
                     node->children.setChild1(Edge());
                     break;
                 case Phantom:
-                    if (!node->child1()) {
-                        m_graph.m_allocator.free(node);
+                    if (!node->child1())
                         continue;
-                    }
                     switch (node->child1()->op()) {
                     case Phi:
                     case SetArgument:
index 9a37e73..fb723d3 100644 (file)
@@ -144,8 +144,6 @@ void clobberize(Graph& graph, Node* node, ReadFunctor& read, WriteFunctor& write
     case FiatInt52:
     case MakeRope:
     case ValueToInt32:
-    case GetExecutable:
-    case BottomValue:
         def(PureValue(node));
         return;
         
@@ -241,8 +239,12 @@ void clobberize(Graph& graph, Node* node, ReadFunctor& read, WriteFunctor& write
         def(PureValue(node, node->arithMode()));
         return;
         
-    case CheckCell:
-        def(PureValue(CheckCell, AdjacencyList(AdjacencyList::Fixed, node->child1()), node->cellOperand()));
+    case CheckFunction:
+        def(PureValue(CheckFunction, AdjacencyList(AdjacencyList::Fixed, node->child1()), node->function()));
+        return;
+        
+    case CheckExecutable:
+        def(PureValue(node, node->executable()));
         return;
         
     case ConstantStoragePointer:
@@ -261,7 +263,6 @@ void clobberize(Graph& graph, Node* node, ReadFunctor& read, WriteFunctor& write
     case Switch:
     case Throw:
     case ForceOSRExit:
-    case CheckBadCell:
     case Return:
     case Unreachable:
     case CheckTierUpInLoop:
@@ -357,8 +358,6 @@ void clobberize(Graph& graph, Node* node, ReadFunctor& read, WriteFunctor& write
     case ArrayPop:
     case Call:
     case Construct:
-    case ProfiledCall:
-    case ProfiledConstruct:
     case NativeCall:
     case NativeConstruct:
     case ToPrimitive:
index 68e7a41..417d7ab 100644 (file)
@@ -63,13 +63,6 @@ enum RefNodeMode {
     DontRefNode
 };
 
-enum SwitchKind {
-    SwitchImm,
-    SwitchChar,
-    SwitchString,
-    SwitchCell
-};
-
 inline bool verboseCompilationEnabled(CompilationMode mode = DFGMode)
 {
     return Options::verboseCompilation() || Options::dumpGraphAtEachPhase() || (isFTL(mode) && Options::verboseFTLCompilation());
index a776162..ddbeca0 100644 (file)
@@ -142,8 +142,8 @@ private:
                 break;
             }
                 
-            case CheckCell: {
-                if (m_state.forNode(node->child1()).value() != node->cellOperand()->value())
+            case CheckFunction: {
+                if (m_state.forNode(node->child1()).value() != node->function()->value())
                     break;
                 node->convertToPhantom();
                 eliminated = true;
@@ -384,19 +384,6 @@ private:
                 }
                 break;
             }
-                
-            case ProfiledCall:
-            case ProfiledConstruct: {
-                if (!m_state.forNode(m_graph.varArgChild(node, 0)).m_value)
-                    break;
-                
-                // If we were able to prove that the callee is a constant then the normal call
-                // inline cache will record this callee. This means that there is no need to do any
-                // additional profiling.
-                node->setOp(node->op() == ProfiledCall ? Call : Construct);
-                eliminated = true;
-                break;
-            }
 
             default:
                 break;
index 9d6d4e0..3114f6a 100644 (file)
@@ -91,7 +91,7 @@ bool doesGC(Graph& graph, Node* node)
     case PutByIdFlush:
     case PutByIdDirect:
     case CheckStructure:
-    case GetExecutable:
+    case CheckExecutable:
     case GetButterfly:
     case CheckArray:
     case GetScope:
@@ -104,7 +104,7 @@ bool doesGC(Graph& graph, Node* node)
     case PutGlobalVar:
     case VariableWatchpoint:
     case VarInjectionWatchpoint:
-    case CheckCell:
+    case CheckFunction:
     case AllocationProfileWatchpoint:
     case RegExpExec:
     case RegExpTest:
@@ -119,8 +119,6 @@ bool doesGC(Graph& graph, Node* node)
     case Construct:
     case NativeCall:
     case NativeConstruct:
-    case ProfiledCall:
-    case ProfiledConstruct:
     case Breakpoint:
     case ProfileWillCall:
     case ProfileDidCall:
@@ -197,8 +195,6 @@ bool doesGC(Graph& graph, Node* node)
     case GetDirectPname:
     case FiatInt52:
     case BooleanToNumber:
-    case CheckBadCell:
-    case BottomValue:
         return false;
 
     case CreateActivation:
index 2601735..30b4f38 100644 (file)
@@ -89,9 +89,6 @@ static CompilationResult compileImpl(
         vm.getCTIStub(virtualConstructThatPreservesRegsThunkGenerator);
     }
     
-    if (CallEdgeLog::isEnabled())
-        vm.ensureCallEdgeLog().processLog();
-    
     RefPtr<Plan> plan = adoptRef(
         new Plan(codeBlock, profiledDFGCodeBlock, mode, osrEntryBytecodeIndex, mustHandleValues));
     
index 78b08d7..6bb35e6 100644 (file)
@@ -736,12 +736,6 @@ private:
                 else if (node->child1()->shouldSpeculateString())
                     fixEdge<StringUse>(node->child1());
                 break;
-            case SwitchCell:
-                if (node->child1()->shouldSpeculateCell())
-                    fixEdge<CellUse>(node->child1());
-                // else it's fine for this to have UntypedUse; we will handle this by just making
-                // non-cells take the default case.
-                break;
             }
             break;
         }
@@ -903,13 +897,13 @@ private:
             break;
         }
 
-        case GetExecutable: {
+        case CheckExecutable: {
             fixEdge<FunctionUse>(node->child1());
             break;
         }
             
         case CheckStructure:
-        case CheckCell:
+        case CheckFunction:
         case CheckHasInstance:
         case CreateThis:
         case GetButterfly: {
@@ -1126,8 +1120,6 @@ private:
         case AllocationProfileWatchpoint:
         case Call:
         case Construct:
-        case ProfiledCall:
-        case ProfiledConstruct:
         case NativeCall:
         case NativeConstruct:
         case NewObject:
@@ -1157,7 +1149,6 @@ private:
         case ThrowReferenceError:
         case CountExecution:
         case ForceOSRExit:
-        case CheckBadCell:
         case CheckWatchdogTimer:
         case Unreachable:
         case ExtractOSREntryLocal:
@@ -1168,7 +1159,6 @@ private:
         case TypedArrayWatchpoint:
         case MovHint:
         case ZombieHint:
-        case BottomValue:
             break;
 #else
         default:
index d9a5f4b..1fedfbe 100644 (file)
@@ -222,23 +222,24 @@ void Graph::dump(PrintStream& out, const char* prefix, Node* node, DumpContext*
         out.print(comma, inContext(*node->structure(), context));
     if (node->hasTransition())
         out.print(comma, pointerDumpInContext(node->transition(), context));
-    if (node->hasCellOperand()) {
-        if (!node->cellOperand()->value() || !node->cellOperand()->value().isCell())
-            out.print(comma, "invalid cell operand: ", node->cellOperand()->value());
-        else {
-            out.print(comma, pointerDump(node->cellOperand()->value().asCell()));
-            if (node->cellOperand()->value().isCell()) {
-                CallVariant variant(node->cellOperand()->value().asCell());
-                if (ExecutableBase* executable = variant.executable()) {
-                    if (executable->isHostFunction())
-                        out.print(comma, "<host function>");
-                    else if (FunctionExecutable* functionExecutable = jsDynamicCast<FunctionExecutable*>(executable))
-                        out.print(comma, FunctionExecutableDump(functionExecutable));
-                    else
-                        out.print(comma, "<non-function executable>");
-                }
-            }
-        }
+    if (node->hasFunction()) {
+        out.print(comma, "function(", pointerDump(node->function()), ", ");
+        if (node->function()->value().isCell()
+            && node->function()->value().asCell()->inherits(JSFunction::info())) {
+            JSFunction* function = jsCast<JSFunction*>(node->function()->value().asCell());
+            if (function->isHostFunction())
+                out.print("<host function>");
+            else
+                out.print(FunctionExecutableDump(function->jsExecutable()));
+        } else
+            out.print("<not JSFunction>");
+        out.print(")");
+    }
+    if (node->hasExecutable()) {
+        if (node->executable()->inherits(FunctionExecutable::info()))
+            out.print(comma, "executable(", FunctionExecutableDump(jsCast<FunctionExecutable*>(node->executable())), ")");
+        else
+            out.print(comma, "executable(not function: ", RawPointer(node->executable()), ")");
     }
     if (node->hasFunctionDeclIndex()) {
         FunctionExecutable* executable = m_codeBlock->functionDecl(node->functionDeclIndex());
@@ -984,6 +985,10 @@ void Graph::visitChildren(SlotVisitor& visitor)
             Node* node = block->at(nodeIndex);
             
             switch (node->op()) {
+            case CheckExecutable:
+                visitor.appendUnbarrieredReadOnlyPointer(node->executable());
+                break;
+                
             case CheckStructure:
                 for (unsigned i = node->structureSet().size(); i--;)
                     visitor.appendUnbarrieredReadOnlyPointer(node->structureSet()[i]);
index cd27c7f..189ce48 100644 (file)
@@ -188,7 +188,7 @@ void JITCompiler::link(LinkBuffer& linkBuffer)
             table.ctiOffsets[j] = table.ctiDefault;
         for (unsigned j = data.cases.size(); j--;) {
             SwitchCase& myCase = data.cases[j];
-            table.ctiOffsets[myCase.value.switchLookupValue(data.kind) - table.min] =
+            table.ctiOffsets[myCase.value.switchLookupValue() - table.min] =
                 linkBuffer.locationOf(m_blockHeads[myCase.target.block->index]);
         }
     }
index 6011490..244c7ed 100644 (file)
@@ -113,36 +113,6 @@ TriState LazyJSValue::strictEqual(const LazyJSValue& other) const
     return FalseTriState;
 }
 
-uintptr_t LazyJSValue::switchLookupValue(SwitchKind kind) const
-{
-    // NB. Not every kind of JSValue will be able to give you a switch lookup
-    // value, and this method will assert, or do bad things, if you use it
-    // for a kind of value that can't.
-    switch (m_kind) {
-    case KnownValue:
-        switch (kind) {
-        case SwitchImm:
-            return value()->value().asInt32();
-        case SwitchCell:
-            return bitwise_cast<uintptr_t>(value()->value().asCell());
-        default:
-            RELEASE_ASSERT_NOT_REACHED();
-            return 0;
-        }
-    case SingleCharacterString:
-        switch (kind) {
-        case SwitchChar:
-            return character();
-        default:
-            RELEASE_ASSERT_NOT_REACHED();
-            return 0;
-        }
-    default:
-        RELEASE_ASSERT_NOT_REACHED();
-        return 0;
-    }
-}
-
 void LazyJSValue::dumpInContext(PrintStream& out, DumpContext* context) const
 {
     switch (m_kind) {
index a1231db..0b8187b 100644 (file)
@@ -28,7 +28,6 @@
 
 #if ENABLE(DFG_JIT)
 
-#include "DFGCommon.h"
 #include "DFGFrozenValue.h"
 #include <wtf/text/StringImpl.h>
 
@@ -96,7 +95,21 @@ public:
     
     TriState strictEqual(const LazyJSValue& other) const;
     
-    uintptr_t switchLookupValue(SwitchKind) const;
+    unsigned switchLookupValue() const
+    {
+        // NB. Not every kind of JSValue will be able to give you a switch lookup
+        // value, and this method will assert, or do bad things, if you use it
+        // for a kind of value that can't.
+        switch (m_kind) {
+        case KnownValue:
+            return value()->value().asInt32();
+        case SingleCharacterString:
+            return character();
+        default:
+            RELEASE_ASSERT_NOT_REACHED();
+            return 0;
+        }
+    }
     
     void dump(PrintStream&) const;
     void dumpInContext(PrintStream&, DumpContext*) const;
index e6bb969..c277291 100644 (file)
@@ -113,9 +113,6 @@ void printInternal(PrintStream& out, SwitchKind kind)
     case SwitchString:
         out.print("SwitchString");
         return;
-    case SwitchCell:
-        out.print("SwitchCell");
-        return;
     }
     RELEASE_ASSERT_NOT_REACHED();
 }
index e4b2bef..1423394 100644 (file)
@@ -157,6 +157,12 @@ struct SwitchCase {
     BranchTarget target;
 };
 
+enum SwitchKind {
+    SwitchImm,
+    SwitchChar,
+    SwitchString
+};
+
 struct SwitchData {
     // Initializes most fields to obviously invalid values. Anyone
     // constructing this should make sure to initialize everything they
@@ -179,7 +185,6 @@ struct SwitchData {
 // distinguishes an immediate value (typically an index into a CodeBlock data structure - 
 // a constant index, argument, or identifier) from a Node*.
 struct OpInfo {
-    OpInfo() : m_value(0) { }
     explicit OpInfo(int32_t value) : m_value(static_cast<uintptr_t>(value)) { }
     explicit OpInfo(uint32_t value) : m_value(static_cast<uintptr_t>(value)) { }
 #if OS(DARWIN) || USE(JSVALUE64)
@@ -1004,8 +1009,6 @@ struct Node {
         case GetMyArgumentByValSafe:
         case Call:
         case Construct:
-        case ProfiledCall:
-        case ProfiledConstruct:
         case NativeCall:
         case NativeConstruct:
         case GetByOffset:
@@ -1041,11 +1044,9 @@ struct Node {
         m_opInfo2 = prediction;
     }
     
-    bool hasCellOperand()
+    bool canBeKnownFunction()
     {
         switch (op()) {
-        case AllocationProfileWatchpoint:
-        case CheckCell:
         case NativeConstruct:
         case NativeCall:
             return true;
@@ -1054,16 +1055,54 @@ struct Node {
         }
     }
 
-    FrozenValue* cellOperand()
+    bool hasKnownFunction()
+    {
+        switch (op()) {
+        case NativeConstruct:
+        case NativeCall:
+            return (bool)m_opInfo;
+        default:
+            return false;
+        }
+    }
+    
+    JSFunction* knownFunction()
+    {
+        ASSERT(canBeKnownFunction());
+        return bitwise_cast<JSFunction*>(m_opInfo);
+    }
+
+    void giveKnownFunction(JSFunction* callData) 
     {
-        ASSERT(hasCellOperand());
+        ASSERT(canBeKnownFunction());
+        m_opInfo = bitwise_cast<uintptr_t>(callData);
+    }
+
+    bool hasFunction()
+    {
+        switch (op()) {
+        case CheckFunction:
+        case AllocationProfileWatchpoint:
+            return true;
+        default:
+            return false;
+        }
+    }
+
+    FrozenValue* function()
+    {
+        ASSERT(hasFunction());
         return reinterpret_cast<FrozenValue*>(m_opInfo);
     }
     
-    void setCellOperand(FrozenValue* value)
+    bool hasExecutable()
     {
-        ASSERT(hasCellOperand());
-        m_opInfo = bitwise_cast<uintptr_t>(value);
+        return op() == CheckExecutable;
+    }
+    
+    ExecutableBase* executable()
+    {
+        return jsCast<ExecutableBase*>(reinterpret_cast<JSCell*>(m_opInfo));
     }
     
     bool hasVariableWatchpointSet()
index 764a6b0..fede8b5 100644 (file)
@@ -153,7 +153,7 @@ namespace JSC { namespace DFG {
     macro(PutByIdFlush, NodeMustGenerate | NodeMustGenerate | NodeClobbersWorld) \
     macro(PutByIdDirect, NodeMustGenerate | NodeClobbersWorld) \
     macro(CheckStructure, NodeMustGenerate) \
-    macro(GetExecutable, NodeResultJS) \
+    macro(CheckExecutable, NodeMustGenerate) \
     macro(PutStructure, NodeMustGenerate) \
     macro(AllocatePropertyStorage, NodeMustGenerate | NodeResultStorage) \
     macro(ReallocatePropertyStorage, NodeMustGenerate | NodeResultStorage) \
@@ -185,8 +185,7 @@ namespace JSC { namespace DFG {
     macro(VariableWatchpoint, NodeMustGenerate) \
     macro(VarInjectionWatchpoint, NodeMustGenerate) \
     macro(FunctionReentryWatchpoint, NodeMustGenerate) \
-    macro(CheckCell, NodeMustGenerate) \
-    macro(CheckBadCell, NodeMustGenerate) \
+    macro(CheckFunction, NodeMustGenerate) \
     macro(AllocationProfileWatchpoint, NodeMustGenerate) \
     macro(CheckInBounds, NodeMustGenerate) \
     \
@@ -215,8 +214,6 @@ namespace JSC { namespace DFG {
     /* Calls. */\
     macro(Call, NodeResultJS | NodeMustGenerate | NodeHasVarArgs | NodeClobbersWorld) \
     macro(Construct, NodeResultJS | NodeMustGenerate | NodeHasVarArgs | NodeClobbersWorld) \
-    macro(ProfiledCall, NodeResultJS | NodeMustGenerate | NodeHasVarArgs | NodeClobbersWorld) \
-    macro(ProfiledConstruct, NodeResultJS | NodeMustGenerate | NodeHasVarArgs | NodeClobbersWorld) \
     macro(NativeCall, NodeResultJS | NodeMustGenerate | NodeHasVarArgs | NodeClobbersWorld) \
     macro(NativeConstruct, NodeResultJS | NodeMustGenerate | NodeHasVarArgs | NodeClobbersWorld) \
     \
@@ -289,11 +286,6 @@ namespace JSC { namespace DFG {
     /* different compiler. */\
     macro(ForceOSRExit, NodeMustGenerate) \
     \
-    /* Vends a bottom JS value. It is invalid to ever execute this. Useful for cases */\
-    /* where we know that we would have exited but we'd like to still track the control */\
-    /* flow. */\
-    macro(BottomValue, NodeResultJS) \
-    \
     /* Checks the watchdog timer. If the timer has fired, we OSR exit to the */ \
     /* baseline JIT to redo the watchdog timer check, and service the timer. */ \
     macro(CheckWatchdogTimer, NodeMustGenerate) \
index e5d9c10..5850582 100644 (file)
@@ -92,10 +92,8 @@ public:
                         node->children.removeEdge(i--);
                     }
                     
-                    if (node->children.isEmpty()) {
-                        m_graph.m_allocator.free(node);
+                    if (node->children.isEmpty())
                         continue;
-                    }
                     
                     node->convertToCheck();
                 }
index a2c6df4..73d62e8 100644 (file)
@@ -125,7 +125,6 @@ public:
                     }
                     
                     if (node->children.isEmpty()) {
-                        m_graph.m_allocator.free(node);
                         changed = true;
                         continue;
                     }
@@ -143,7 +142,6 @@ public:
                         changed = true;
                     }
                     if (node->children.isEmpty()) {
-                        m_graph.m_allocator.free(node);
                         changed = true;
                         continue;
                     }
@@ -151,10 +149,8 @@ public:
                 }
                     
                 case HardPhantom: {
-                    if (node->children.isEmpty()) {
-                        m_graph.m_allocator.free(node);
+                    if (node->children.isEmpty())
                         continue;
-                    }
                     break;
                 }
                     
index f9c6dcd..19e1733 100644 (file)
@@ -188,8 +188,6 @@ private:
         case GetDirectPname:
         case Call:
         case Construct:
-        case ProfiledCall:
-        case ProfiledConstruct:
         case NativeCall:
         case NativeConstruct:
         case GetGlobalVar:
@@ -198,8 +196,7 @@ private:
             break;
         }
             
-        case GetGetterSetterByOffset:
-        case GetExecutable: {
+        case GetGetterSetterByOffset: {
             changed |= setPrediction(SpecCellOther);
             break;
         }
@@ -645,8 +642,8 @@ private:
         case ForceOSRExit:
         case SetArgument:
         case CheckStructure:
-        case CheckCell:
-        case CheckBadCell:
+        case CheckExecutable:
+        case CheckFunction:
         case PutStructure:
         case TearOffActivation:
         case TearOffArguments:
@@ -668,10 +665,6 @@ private:
         case ZombieHint:
             break;
             
-        // This gets ignored because it only pretends to produce a value.
-        case BottomValue:
-            break;
-            
         // This gets ignored because it already has a prediction.
         case ExtractOSREntryLocal:
             break;
index de97c89..c9f0e1d 100644 (file)
@@ -159,7 +159,7 @@ bool safeToExecute(AbstractStateType& state, Graph& graph, Node* node)
     case PutByIdFlush:
     case PutByIdDirect:
     case CheckStructure:
-    case GetExecutable:
+    case CheckExecutable:
     case GetButterfly:
     case CheckArray:
     case Arrayify:
@@ -174,8 +174,7 @@ bool safeToExecute(AbstractStateType& state, Graph& graph, Node* node)
     case PutGlobalVar:
     case VariableWatchpoint:
     case VarInjectionWatchpoint:
-    case CheckCell:
-    case CheckBadCell:
+    case CheckFunction:
     case AllocationProfileWatchpoint:
     case RegExpExec:
     case RegExpTest:
@@ -188,8 +187,6 @@ bool safeToExecute(AbstractStateType& state, Graph& graph, Node* node)
     case CompareStrictEq:
     case Call:
     case Construct:
-    case ProfiledCall:
-    case ProfiledConstruct:
     case NewObject:
     case NewArray:
     case NewArrayWithSize:
@@ -276,11 +273,6 @@ bool safeToExecute(AbstractStateType& state, Graph& graph, Node* node)
     case NativeConstruct:
         return false; // TODO: add a check for already checked.  https://bugs.webkit.org/show_bug.cgi?id=133769
 
-    case BottomValue:
-        // If in doubt, assume that this isn't safe to execute, just because we have no way of
-        // compiling this node.
-        return false;
-
     case GetByVal:
     case GetIndexedPropertyStorage:
     case GetArrayLength:
index 0729ce8..01b313b 100644 (file)
@@ -5354,10 +5354,6 @@ void SpeculativeJIT::emitSwitch(Node* node)
     case SwitchString: {
         emitSwitchString(node, data);
         return;
-    }
-    case SwitchCell: {
-        DFG_CRASH(m_jit.graph(), node, "Bad switch kind");
-        return;
     } }
     RELEASE_ASSERT_NOT_REACHED();
 }
index 9a52f45..36f290e 100644 (file)
@@ -640,9 +640,9 @@ void SpeculativeJIT::compileMiscStrictEq(Node* node)
 
 void SpeculativeJIT::emitCall(Node* node)
 {
-    bool isCall = node->op() == Call || node->op() == ProfiledCall;
+    bool isCall = node->op() == Call;
     if (!isCall)
-        ASSERT(node->op() == Construct || node->op() == ProfiledConstruct);
+        ASSERT(node->op() == Construct);
 
     // For constructors, the this argument is not passed but we have to make space
     // for it.
@@ -689,13 +689,6 @@ void SpeculativeJIT::emitCall(Node* node)
 
     m_jit.emitStoreCodeOrigin(node->origin.semantic);
     
-    CallLinkInfo* info = m_jit.codeBlock()->addCallLinkInfo();
-
-    if (node->op() == ProfiledCall || node->op() == ProfiledConstruct) {
-        m_jit.vm()->callEdgeLog->emitLogCode(
-            m_jit, info->callEdgeProfile, callee.jsValueRegs());
-    }
-    
     slowPath.append(branchNotCell(callee.jsValueRegs()));
     slowPath.append(m_jit.branchPtrWithPatch(MacroAssembler::NotEqual, calleePayloadGPR, targetToCheck));
     m_jit.loadPtr(MacroAssembler::Address(calleePayloadGPR, OBJECT_OFFSETOF(JSFunction, m_scope)), resultPayloadGPR);
@@ -720,6 +713,7 @@ void SpeculativeJIT::emitCall(Node* node)
         m_jit.move(calleePayloadGPR, GPRInfo::regT0);
         m_jit.move(calleeTagGPR, GPRInfo::regT1);
     }
+    CallLinkInfo* info = m_jit.codeBlock()->addCallLinkInfo();
     m_jit.move(MacroAssembler::TrustedImmPtr(info), GPRInfo::regT2);
     JITCompiler::Call slowCall = m_jit.nearCall();
 
@@ -3681,21 +3675,18 @@ void SpeculativeJIT::compile(Node* node)
         compileGetArrayLength(node);
         break;
         
-    case CheckCell: {
-        SpeculateCellOperand cell(this, node->child1());
-        speculationCheck(BadCell, JSValueSource::unboxedCell(cell.gpr()), node->child1(), m_jit.branchWeakPtr(JITCompiler::NotEqual, cell.gpr(), node->cellOperand()->value().asCell()));
+    case CheckFunction: {
+        SpeculateCellOperand function(this, node->child1());
+        speculationCheck(BadFunction, JSValueSource::unboxedCell(function.gpr()), node->child1(), m_jit.branchWeakPtr(JITCompiler::NotEqual, function.gpr(), node->function()->value().asCell()));
         noResult(node);
         break;
     }
 
-    case GetExecutable: {
+    case CheckExecutable: {
         SpeculateCellOperand function(this, node->child1());
-        GPRTemporary result(this, Reuse, function);
-        GPRReg functionGPR = function.gpr();
-        GPRReg resultGPR = result.gpr();
-        speculateCellType(node->child1(), functionGPR, SpecFunction, JSFunctionType);
-        m_jit.loadPtr(JITCompiler::Address(functionGPR, JSFunction::offsetOfExecutable()), resultGPR);
-        cellResult(resultGPR, node);
+        speculateCellType(node->child1(), function.gpr(), SpecFunction, JSFunctionType);
+        speculationCheck(BadExecutable, JSValueSource::unboxedCell(function.gpr()), node->child1(), m_jit.branchWeakPtr(JITCompiler::NotEqual, JITCompiler::Address(function.gpr(), JSFunction::offsetOfExecutable()), node->executable()));
+        noResult(node);
         break;
     }
         
@@ -4165,8 +4156,6 @@ void SpeculativeJIT::compile(Node* node)
 
     case Call:
     case Construct:
-    case ProfiledCall:
-    case ProfiledConstruct:
         emitCall(node);
         break;
 
@@ -4913,8 +4902,6 @@ void SpeculativeJIT::compile(Node* node)
     case MultiPutByOffset:
     case NativeCall:
     case NativeConstruct:
-    case CheckBadCell:
-    case BottomValue:
         RELEASE_ASSERT_NOT_REACHED();
         break;
     }
index ea8f031..7158385 100644 (file)
@@ -626,9 +626,10 @@ void SpeculativeJIT::compileMiscStrictEq(Node* node)
 
 void SpeculativeJIT::emitCall(Node* node)
 {
-    bool isCall = node->op() == Call || node->op() == ProfiledCall;
+
+    bool isCall = node->op() == Call;
     if (!isCall)
-        DFG_ASSERT(m_jit.graph(), node, node->op() == Construct || node->op() == ProfiledConstruct);
+        DFG_ASSERT(m_jit.graph(), node, node->op() == Construct);
     
     // For constructors, the this argument is not passed but we have to make space
     // for it.
@@ -669,13 +670,6 @@ void SpeculativeJIT::emitCall(Node* node)
 
     m_jit.emitStoreCodeOrigin(node->origin.semantic);
     
-    CallLinkInfo* callLinkInfo = m_jit.codeBlock()->addCallLinkInfo();
-    
-    if (node->op() == ProfiledCall || node->op() == ProfiledConstruct) {
-        m_jit.vm()->callEdgeLog->emitLogCode(
-            m_jit, callLinkInfo->callEdgeProfile, JSValueRegs(calleeGPR));
-    }
-
     slowPath = m_jit.branchPtrWithPatch(MacroAssembler::NotEqual, calleeGPR, targetToCheck, MacroAssembler::TrustedImmPtr(0));
 
     m_jit.loadPtr(MacroAssembler::Address(calleeGPR, OBJECT_OFFSETOF(JSFunction, m_scope)), resultGPR);
@@ -688,6 +682,7 @@ void SpeculativeJIT::emitCall(Node* node)
     slowPath.link(&m_jit);
     
     m_jit.move(calleeGPR, GPRInfo::regT0); // Callee needs to be in regT0
+    CallLinkInfo* callLinkInfo = m_jit.codeBlock()->addCallLinkInfo();
     m_jit.move(MacroAssembler::TrustedImmPtr(callLinkInfo), GPRInfo::regT2); // Link info needs to be in regT2
     JITCompiler::Call slowCall = m_jit.nearCall();
     
@@ -3773,21 +3768,18 @@ void SpeculativeJIT::compile(Node* node)
         compileGetArrayLength(node);
         break;
         
-    case CheckCell: {
-        SpeculateCellOperand cell(this, node->child1());
-        speculationCheck(BadCell, JSValueSource::unboxedCell(cell.gpr()), node->child1(), m_jit.branchWeakPtr(JITCompiler::NotEqual, cell.gpr(), node->cellOperand()->value().asCell()));
+    case CheckFunction: {
+        SpeculateCellOperand function(this, node->child1());
+        speculationCheck(BadFunction, JSValueSource::unboxedCell(function.gpr()), node->child1(), m_jit.branchWeakPtr(JITCompiler::NotEqual, function.gpr(), node->function()->value().asCell()));
         noResult(node);
         break;
     }
         
-    case GetExecutable: {
+    case CheckExecutable: {
         SpeculateCellOperand function(this, node->child1());
-        GPRTemporary result(this, Reuse, function);
-        GPRReg functionGPR = function.gpr();
-        GPRReg resultGPR = result.gpr();
-        speculateCellType(node->child1(), functionGPR, SpecFunction, JSFunctionType);
-        m_jit.loadPtr(JITCompiler::Address(functionGPR, JSFunction::offsetOfExecutable()), resultGPR);
-        cellResult(resultGPR, node);
+        speculateCellType(node->child1(), function.gpr(), SpecFunction, JSFunctionType);
+        speculationCheck(BadExecutable, JSValueSource::unboxedCell(function.gpr()), node->child1(), m_jit.branchWeakPtr(JITCompiler::NotEqual, JITCompiler::Address(function.gpr(), JSFunction::offsetOfExecutable()), node->executable()));
+        noResult(node);
         break;
     }
         
@@ -4227,11 +4219,9 @@ void SpeculativeJIT::compile(Node* node)
 
     case Call:
     case Construct:
-    case ProfiledCall:
-    case ProfiledConstruct:
         emitCall(node);
         break;
-        
+
     case CreateActivation: {
         DFG_ASSERT(m_jit.graph(), node, !node->origin.semantic.inlineCallFrame);
         
@@ -4980,9 +4970,7 @@ void SpeculativeJIT::compile(Node* node)
     case MultiGetByOffset:
     case MultiPutByOffset:
     case FiatInt52:
-    case CheckBadCell:
-    case BottomValue:
-        DFG_CRASH(m_jit.graph(), node, "Unexpected node");
+        DFG_CRASH(m_jit.graph(), node, "Unexpected FTL node");
         break;
     }
 
index 0f2e146..408ee4c 100644 (file)
@@ -62,6 +62,10 @@ public:
                 Node* node = block->at(nodeIndex);
             
                 switch (node->op()) {
+                case CheckExecutable:
+                    registerStructure(node->executable()->structure());
+                    break;
+                
                 case CheckStructure:
                     registerStructures(node->structureSet());
                     break;
index d1b6e7a..09c22b8 100644 (file)
@@ -50,17 +50,13 @@ public:
         if (!Options::useFTLJIT())
             return false;
         
-        if (m_graph.m_profiledBlock->m_didFailFTLCompilation) {
-            removeFTLProfiling();
+        if (m_graph.m_profiledBlock->m_didFailFTLCompilation)
             return false;
-        }
         
 #if ENABLE(FTL_JIT)
         FTL::CapabilityLevel level = FTL::canCompile(m_graph);
-        if (level == FTL::CannotCompile) {
-            removeFTLProfiling();
+        if (level == FTL::CannotCompile)
             return false;
-        }
         
         if (!Options::enableOSREntryToFTL())
             level = FTL::CanCompile;
@@ -122,32 +118,6 @@ public:
         return false;
 #endif // ENABLE(FTL_JIT)
     }
-
-private:
-    void removeFTLProfiling()
-    {
-        for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) {
-            BasicBlock* block = m_graph.block(blockIndex);
-            if (!block)
-                continue;
-            
-            for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
-                Node* node = block->at(nodeIndex);
-                switch (node->op()) {
-                case ProfiledCall:
-                    node->setOp(Call);
-                    break;
-                    
-                case ProfiledConstruct:
-                    node->setOp(Construct);
-                    break;
-                    
-                default:
-                    break;
-                }
-            }
-        }
-    }
 };
 
 bool performTierUpCheckInjection(Graph& graph)
index 8e90f69..35d1ebb 100644 (file)
@@ -200,8 +200,7 @@ public:
                 
                 VALIDATE((node), !mayExit(m_graph, node) || node->origin.forExit.isSet());
                 VALIDATE((node), !node->hasStructure() || !!node->structure());
-                VALIDATE((node), !node->hasCellOperand() || node->cellOperand()->value().isCell());
-                VALIDATE((node), !node->hasCellOperand() || !!node->cellOperand()->value());
+                VALIDATE((node), !node->hasFunction() || node->function()->value().isFunction());
                  
                 if (!(node->flags() & NodeHasVarArgs)) {
                     if (!node->child2())
index 39aea40..0c48489 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2013, 2014 Apple Inc. All rights reserved.
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -114,7 +114,7 @@ private:
             break;
             
         case AllocationProfileWatchpoint:
-            addLazily(jsCast<JSFunction*>(m_node->cellOperand()->value())->allocationProfileWatchpointSet());
+            addLazily(jsCast<JSFunction*>(m_node->function()->value())->allocationProfileWatchpointSet());
             break;
             
         case VariableWatchpoint:
index 33a3ab5..ee11907 100644 (file)
@@ -104,8 +104,7 @@ inline CapabilityLevel canCompile(Node* node)
     case PutClosureVar:
     case InvalidationPoint:
     case StringCharAt:
-    case CheckCell:
-    case CheckBadCell:
+    case CheckFunction:
     case StringCharCodeAt:
     case AllocatePropertyStorage:
     case ReallocatePropertyStorage:
@@ -127,7 +126,7 @@ inline CapabilityLevel canCompile(Node* node)
     case ConstantStoragePointer:
     case Check:
     case CountExecution:
-    case GetExecutable:
+    case CheckExecutable:
     case GetScope:
     case AllocationProfileWatchpoint:
     case CheckArgumentsNotCreated:
@@ -167,14 +166,8 @@ inline CapabilityLevel canCompile(Node* node)
     case GetGenericPropertyEnumerator:
     case GetEnumeratorPname:
     case ToIndexString:
-    case BottomValue:
         // These are OK.
         break;
-    case ProfiledCall:
-    case ProfiledConstruct:
-        // These are OK not because the FTL can support them, but because if the DFG sees one of
-        // these then the FTL will see a normal Call/Construct.
-        break;
     case Identity:
         // No backend handles this because it will be optimized out. But we may check
         // for capabilities before optimization. It would be a deep error to remove this
@@ -333,7 +326,6 @@ inline CapabilityLevel canCompile(Node* node)
         switch (node->switchData()->kind) {
         case SwitchImm:
         case SwitchChar:
-        case SwitchCell:
             break;
         default:
             return CannotCompile;
index 0081fce..eb0d3df 100644 (file)
@@ -67,6 +67,7 @@ NO_RETURN_DUE_TO_CRASH static void ftlUnreachable()
 NO_RETURN_DUE_TO_CRASH static void ftlUnreachable(
     CodeBlock* codeBlock, BlockIndex blockIndex, unsigned nodeIndex)
 {
+    
     dataLog("Crashing in thought-to-be-unreachable FTL-generated code for ", pointerDump(codeBlock), " at basic block #", blockIndex);
     if (nodeIndex != UINT_MAX)
         dataLog(", node @", nodeIndex);
@@ -152,17 +153,11 @@ public:
         for (unsigned blockIndex = depthFirst.size(); blockIndex--; ) {
             BasicBlock* block = depthFirst[blockIndex];
             for (unsigned nodeIndex = block->size(); nodeIndex--; ) {
-                Node* node = block->at(nodeIndex);
-                switch (node->op()) {
-                case NativeCall:
-                case NativeConstruct: {
+                Node* m_node = block->at(nodeIndex);
+                if (m_node->hasKnownFunction()) {
                     int numArgs = m_node->numChildren();
                     if (numArgs > maxNumberOfArguments)
                         maxNumberOfArguments = numArgs;
-                    break;
-                }
-                default:
-                    break;
                 }
             }
         }
@@ -473,14 +468,11 @@ private:
         case CheckStructure:
             compileCheckStructure();
             break;
-        case CheckCell:
-            compileCheckCell();
-            break;
-        case CheckBadCell:
-            compileCheckBadCell();
+        case CheckFunction:
+            compileCheckFunction();
             break;
-        case GetExecutable:
-            compileGetExecutable();
+        case CheckExecutable:
+            compileCheckExecutable();
             break;
         case ArrayifyToStructure:
             compileArrayifyToStructure();
@@ -1751,25 +1743,26 @@ private:
         m_out.appendTo(continuation, lastNext);
     }
     
-    void compileCheckCell()
+    void compileCheckFunction()
     {
         LValue cell = lowCell(m_node->child1());
         
         speculate(
-            BadCell, jsValueValue(cell), m_node->child1().node(),
-            m_out.notEqual(cell, weakPointer(m_node->cellOperand()->value().asCell())));
+            BadFunction, jsValueValue(cell), m_node->child1().node(),
+            m_out.notEqual(cell, weakPointer(m_node->function()->value().asCell())));
     }
     
-    void compileCheckBadCell()
-    {
-        terminate(BadCell);
-    }
-    
-    void compileGetExecutable()
+    void compileCheckExecutable()
     {
         LValue cell = lowCell(m_node->child1());
+        
         speculateFunction(m_node->child1(), cell);
-        setJSValue(m_out.loadPtr(cell, m_heaps.JSFunction_executable));
+        
+        speculate(
+            BadExecutable, jsValueValue(cell), m_node->child1().node(),
+            m_out.notEqual(
+                m_out.loadPtr(cell, m_heaps.JSFunction_executable),
+                weakPointer(m_node->executable())));
     }
     
     void compileArrayifyToStructure()
@@ -3680,7 +3673,9 @@ private:
         int numPassedArgs = m_node->numChildren() - 1;
         int numArgs = numPassedArgs + dummyThisArgument;
 
-        JSFunction* knownFunction = jsCast<JSFunction*>(m_node->cellOperand()->value().asCell());
+        ASSERT(m_node->hasKnownFunction());
+
+        JSFunction* knownFunction = m_node->knownFunction();
         NativeFunction function = knownFunction->nativeFunction();
 
         Dl_info info;
@@ -3923,37 +3918,10 @@ private:
             return;
         }
         
-        case SwitchString: {
+        case SwitchString:
             DFG_CRASH(m_graph, m_node, "Unimplemented");
-            return;
+            break;
         }
-            
-        case SwitchCell: {
-            LValue cell;
-            switch (m_node->child1().useKind()) {
-            case CellUse: {
-                cell = lowCell(m_node->child1());
-                break;
-            }
-                
-            case UntypedUse: {
-                LValue value = lowJSValue(m_node->child1());
-                LBasicBlock cellCase = FTL_NEW_BLOCK(m_out, ("Switch/SwitchCell cell case"));
-                m_out.branch(
-                    isCell(value), unsure(cellCase), unsure(lowBlock(data->fallThrough.block)));
-                m_out.appendTo(cellCase);
-                cell = value;
-                break;
-            }
-                
-            default:
-                DFG_CRASH(m_graph, m_node, "Bad use kind");
-                return;
-            }
-            
-            buildSwitch(m_node->switchData(), m_out.intPtr, cell);
-            return;
-        } }
         
         DFG_CRASH(m_graph, m_node, "Bad switch kind");
     }
@@ -5218,7 +5186,7 @@ private:
         Vector<SwitchCase> cases;
         for (unsigned i = 0; i < data->cases.size(); ++i) {
             cases.append(SwitchCase(
-                constInt(type, data->cases[i].value.switchLookupValue(data->kind)),
+                constInt(type, data->cases[i].value.switchLookupValue()),
                 lowBlock(data->cases[i].target.block), Weight(data->cases[i].target.count)));
         }
         
index f795c73..b35f39f 100644 (file)
@@ -984,11 +984,6 @@ void Heap::collect(HeapOperation collectionType)
         vm()->typeProfilerLog()->processLogEntries(ASCIILiteral("GC"));
     }
     
-    if (vm()->callEdgeLog) {
-        DeferGCForAWhile awhile(*this);
-        vm()->callEdgeLog->processLog();
-    }
-    
     RELEASE_ASSERT(!m_deferralDepth);
     ASSERT(vm()->currentThreadIsHoldingAPILock());
     RELEASE_ASSERT(vm()->atomicStringTable() == wtfThreadData().atomicStringTable());
index 8f8661b..9aa39f2 100644 (file)
@@ -88,31 +88,6 @@ public:
 #endif
     }
     
-    void storeValue(JSValueRegs regs, void* address)
-    {
-#if USE(JSVALUE64)
-        store64(regs.gpr(), address);
-#else
-        store32(regs.payloadGPR(), bitwise_cast<void*>(bitwise_cast<uintptr_t>(address) + PayloadOffset));
-        store32(regs.tagGPR(), bitwise_cast<void*>(bitwise_cast<uintptr_t>(address) + TagOffset));
-#endif
-    }
-    
-    void loadValue(Address address, JSValueRegs regs)
-    {
-#if USE(JSVALUE64)
-        load64(address, regs.gpr());
-#else
-        if (address.base == regs.payloadGPR()) {
-            load32(address.withOffset(TagOffset), regs.tagGPR());
-            load32(address.withOffset(PayloadOffset), regs.payloadGPR());
-        } else {
-            load32(address.withOffset(PayloadOffset), regs.payloadGPR());
-            load32(address.withOffset(TagOffset), regs.tagGPR());
-        }
-#endif
-    }
-    
     void moveTrustedValue(JSValue value, JSValueRegs regs)
     {
 #if USE(JSVALUE64)
index c58db6c..2d3ae89 100644 (file)
@@ -1666,15 +1666,6 @@ public:
         move(arg4, GPRInfo::argumentGPR3);
     }
 #endif
-    
-    void setupArguments(JSValueRegs arg1)
-    {
-#if USE(JSVALUE64)
-        setupArguments(arg1.gpr());
-#else
-        setupArguments(arg1.payloadGPR(), arg1.tagGPR());
-#endif
-    }
 
     void setupResults(GPRReg destA, GPRReg destB)
     {
index 4b770a6..92a18e2 100644 (file)
@@ -60,8 +60,6 @@ public:
     GPRReg tagGPR() const { return InvalidGPRReg; }
     GPRReg payloadGPR() const { return m_gpr; }
     
-    bool uses(GPRReg gpr) const { return m_gpr == gpr; }
-    
 private:
     GPRReg m_gpr;
 };
@@ -171,8 +169,6 @@ public:
         return tagGPR();
     }
 
-    bool uses(GPRReg gpr) const { return m_tagGPR == gpr || m_payloadGPR == gpr; }
-    
 private:
     int8_t m_tagGPR;
     int8_t m_payloadGPR;
index 6b3bc9a..140e16c 100644 (file)
@@ -212,12 +212,6 @@ void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned ca
     emitGetVirtualRegister(callee, regT0); // regT0 holds callee.
 
     store64(regT0, Address(stackPointerRegister, JSStack::Callee * static_cast<int>(sizeof(Register)) - sizeof(CallerFrameAndPC)));
-    
-    CallLinkInfo* info = m_codeBlock->addCallLinkInfo();
-
-    if (CallEdgeLog::isEnabled() && shouldEmitProfiling()
-        && Options::baselineDoesCallEdgeProfiling())
-        m_vm->ensureCallEdgeLog().emitLogCode(*this, info->callEdgeProfile, JSValueRegs(regT0));
 
     if (opcodeID == op_call_eval) {
         compileCallEval(instruction);
@@ -229,6 +223,7 @@ void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned ca
     addSlowCase(slowCase);
 
     ASSERT(m_callCompilationInfo.size() == callLinkInfoIndex);
+    CallLinkInfo* info = m_codeBlock->addCallLinkInfo();
     info->callType = CallLinkInfo::callTypeFor(opcodeID);
     info->codeOrigin = CodeOrigin(m_bytecodeOffset);
     info->calleeGPR = regT0;
index f6760a9..f67162d 100644 (file)
@@ -300,14 +300,6 @@ void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned ca
     store32(regT0, Address(stackPointerRegister, JSStack::Callee * static_cast<int>(sizeof(Register)) + PayloadOffset - sizeof(CallerFrameAndPC)));
     store32(regT1, Address(stackPointerRegister, JSStack::Callee * static_cast<int>(sizeof(Register)) + TagOffset - sizeof(CallerFrameAndPC)));
 
-    CallLinkInfo* info = m_codeBlock->addCallLinkInfo();
-
-    if (CallEdgeLog::isEnabled() && shouldEmitProfiling()
-        && Options::baselineDoesCallEdgeProfiling()) {
-        m_vm->ensureCallEdgeLog().emitLogCode(
-            *this, info->callEdgeProfile, JSValueRegs(regT1, regT0));
-    }
-
     if (opcodeID == op_call_eval) {
         compileCallEval(instruction);
         return;
@@ -321,6 +313,7 @@ void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned ca
     addSlowCase(slowCase);
 
     ASSERT(m_callCompilationInfo.size() == callLinkInfoIndex);
+    CallLinkInfo* info = m_codeBlock->addCallLinkInfo();
     info->callType = CallLinkInfo::callTypeFor(opcodeID);
     info->codeOrigin = CodeOrigin(m_bytecodeOffset);
     info->calleeGPR = regT0;
index ac7a2f4..dd6f170 100644 (file)
@@ -167,13 +167,6 @@ typedef const char* optionString;
     v(bool, enableAccessInlining, true) \
     v(bool, enablePolyvariantDevirtualization, true) \
     v(bool, enablePolymorphicAccessInlining, true) \
-    v(bool, enablePolymorphicCallInlining, true) \
-    v(bool, callStatusShouldUseCallEdgeProfile, true) \
-    v(bool, callEdgeProfileReallyProcessesLog, true) \
-    v(bool, baselineDoesCallEdgeProfiling, false) \
-    v(bool, dfgDoesCallEdgeProfiling, true) \
-    v(bool, enableCallEdgeProfiling, true) \
-    v(unsigned, frequentCallThreshold, 2) \
     v(bool, optimizeNativeCalls, false) \
     \
     v(bool, enableConcurrentJIT, true) \
index 55e7813..666792a 100644 (file)
@@ -373,13 +373,6 @@ VM*& VM::sharedInstanceInternal()
     return sharedInstance;
 }
 
-CallEdgeLog& VM::ensureCallEdgeLog()
-{
-    if (!callEdgeLog)
-        callEdgeLog = std::make_unique<CallEdgeLog>();
-    return *callEdgeLog;
-}
-
 #if ENABLE(JIT)
 static ThunkGenerator thunkGeneratorForIntrinsic(Intrinsic intrinsic)
 {
index fa8cb3e..552c311 100644 (file)
@@ -72,7 +72,6 @@ namespace JSC {
 
     class ArityCheckFailReturnThunks;
     class BuiltinExecutables;
-    class CallEdgeLog;
     class CodeBlock;
     class CodeCache;
     class CommonIdentifiers;
@@ -234,9 +233,6 @@ namespace JSC {
 #if ENABLE(DFG_JIT)
         OwnPtr<DFG::LongLivedState> dfgState;
 #endif // ENABLE(DFG_JIT)
-        
-        std::unique_ptr<CallEdgeLog> callEdgeLog;
-        CallEdgeLog& ensureCallEdgeLog();
 
         VMType vmType;
         ClientData* clientData;
diff --git a/Source/JavaScriptCore/tests/stress/new-array-then-exit.js b/Source/JavaScriptCore/tests/stress/new-array-then-exit.js
deleted file mode 100644 (file)
index 7c8a690..0000000
+++ /dev/null
@@ -1,14 +0,0 @@
-function foo(f) {
-    return new f();
-}
-
-noInline(foo);
-
-for (var i = 0; i < 10000; ++i)
-    foo(Array);
-
-var didCall = false;
-foo(function() { didCall = true; });
-
-if (!didCall)
-    throw "Error: didn't call my function.";
diff --git a/Source/JavaScriptCore/tests/stress/poly-call-exit-this.js b/Source/JavaScriptCore/tests/stress/poly-call-exit-this.js
deleted file mode 100644 (file)
index 596af3e..0000000
+++ /dev/null
@@ -1,29 +0,0 @@
-(function() {
-    function foo(x) { return 1 + this.f; }
-    function bar(x) { return x + this.f; }
-    function baz(x) { return x + 1 + this.f; }
-    
-    var n = 1000000;
-    
-    var result = (function(o) {
-        var f = {fun:foo, f:1};
-        var g = {fun:bar, f:2};
-        var h = {fun:baz, f:3};
-        
-        var result = 0;
-        for (var i = 0; i < n; ++i) {
-            if (i == n - 1)
-                f = h;
-            result += f.fun(o.f);
-            
-            var tmp = f;
-            f = g;
-            g = tmp;
-        }
-        
-        return result;
-    })({f:42});
-    
-    if (result != ((n / 2 - 1) * (42 + 2)) + (n / 2 * (1 + 1) + (42 + 1 + 3)))
-        throw "Error: bad result: " + result;
-})();
diff --git a/Source/JavaScriptCore/tests/stress/poly-call-exit.js b/Source/JavaScriptCore/tests/stress/poly-call-exit.js
deleted file mode 100644 (file)
index eadd16a..0000000
+++ /dev/null
@@ -1,29 +0,0 @@
-(function() {
-    function foo(x) { return 1; }
-    function bar(x) { return x; }
-    function baz(x) { return x + 1; }
-    
-    var n = 1000000;
-    
-    var result = (function(o) {
-        var f = foo;
-        var g = bar;
-        var h = baz;
-        
-        var result = 0;
-        for (var i = 0; i < n; ++i) {
-            if (i == n - 1)
-                f = h;
-            result += f(o.f);
-            
-            var tmp = f;
-            f = g;
-            g = tmp;
-        }
-        
-        return result;
-    })({f:42});
-    
-    if (result != ((n / 2 - 1) * 42) + (n / 2 * 1) + (42 + 1))
-        throw "Error: bad result: " + result;
-})();
index fe56dd5..9d551ce 100644 (file)
@@ -1,3 +1,18 @@
+2014-08-26  Commit Queue  <commit-queue@webkit.org>
+
+        Unreviewed, rolling out r172940.
+        https://bugs.webkit.org/show_bug.cgi?id=136256
+
+        Caused assertions on fast/storage/serialized-script-
+        value.html, and possibly flakiness on more tests (Requested by
+        ap on #webkit).
+
+        Reverted changeset:
+
+        "FTL should be able to do polymorphic call inlining"
+        https://bugs.webkit.org/show_bug.cgi?id=135145
+        http://trac.webkit.org/changeset/172940
+
 2014-08-23  Filip Pizlo  <fpizlo@apple.com>
 
         FTL should be able to do polymorphic call inlining
index fa35b71..48d0e68 100644 (file)
@@ -1,5 +1,5 @@
 /*
- *  Copyright (C) 2006, 2007, 2008, 2009, 2010, 2014 Apple Inc. All rights reserved.
+ *  Copyright (C) 2006, 2007, 2008, 2009, 2010 Apple Inc. All rights reserved.
  *
  *  This library is free software; you can redistribute it and/or
  *  modify it under the terms of the GNU Library General Public
@@ -22,7 +22,6 @@
 #define WTF_OwnPtr_h
 
 #include <wtf/Assertions.h>
-#include <wtf/Atomics.h>
 #include <wtf/Noncopyable.h>
 #include <wtf/OwnPtrCommon.h>
 #include <algorithm>
@@ -73,17 +72,6 @@ namespace WTF {
         template<typename U> OwnPtr& operator=(OwnPtr<U>&&);
 
         void swap(OwnPtr& o) { std::swap(m_ptr, o.m_ptr); }
-        
-        // Construct an object to store into this OwnPtr, but only so long as this OwnPtr
-        // doesn't already point to an object. This will ensure that after you call this,
-        // the OwnPtr will point to an instance of T, even if called concurrently. This
-        // instance may or may not have been created by this call. Moreover, this call uses
-        // an opportunistic transaction, in that we may create an instance of T and then
-        // immediately throw it away, if in the process of creating that instance some
-        // other thread was doing the same thing and stored its instance into this pointer
-        // before we had a chance to do so.
-        template<typename... Args>
-        void createTransactionally(Args...);
 
     private:
         explicit OwnPtr(PtrType ptr) : m_ptr(ptr) { }
@@ -198,28 +186,6 @@ namespace WTF {
         return p.get();
     }
 
-    template<typename T> template<typename... Args> inline void OwnPtr<T>::createTransactionally(Args... args)
-    {
-        if (m_ptr) {
-            WTF::loadLoadFence();
-            return;
-        }
-        
-        T* newObject = new T(args...);
-        WTF::storeStoreFence();
-#if ENABLE(COMPARE_AND_SWAP)
-        do {
-            if (m_ptr) {
-                delete newObject;
-                WTF::loadLoadFence();
-                return;
-            }
-        } while (!WTF::weakCompareAndSwap(bitwise_cast<void*volatile*>(&m_ptr), nullptr, newObject));
-#else
-        m_ptr = newObject;
-#endif
-    }
-
 } // namespace WTF
 
 using WTF::OwnPtr;
index 0370884..3e6fa4a 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2011, 2014 Apple Inc. All rights reserved.
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
 
 namespace WTF {
 
-template<typename T, typename CounterType = unsigned>
+template<typename T>
 class Spectrum {
 public:
-    typedef typename HashMap<T, CounterType>::iterator iterator;
-    typedef typename HashMap<T, CounterType>::const_iterator const_iterator;
+    typedef typename HashMap<T, unsigned long>::iterator iterator;
+    typedef typename HashMap<T, unsigned long>::const_iterator const_iterator;
     
     Spectrum() { }
     
-    void add(const T& key, CounterType count = 1)
+    void add(const T& key, unsigned long count = 1)
     {
-        if (!count)
-            return;
-        typename HashMap<T, CounterType>::AddResult result = m_map.add(key, count);
+        typename HashMap<T, unsigned long>::AddResult result = m_map.add(key, count);
         if (!result.isNewEntry)
             result.iterator->value += count;
     }
     
-    template<typename U>
-    void addAll(const Spectrum<T, U>& otherSpectrum)
-    {
-        for (auto& entry : otherSpectrum)
-            add(entry.key, entry.count);
-    }
-    
-    CounterType get(const T& key) const
+    unsigned long get(const T& key) const
     {
         const_iterator iter = m_map.find(key);
         if (iter == m_map.end())
@@ -64,8 +55,6 @@ public:
         return iter->value;
     }
     
-    size_t size() const { return m_map.size(); }
-    
     iterator begin() { return m_map.begin(); }
     iterator end() { return m_map.end(); }
     const_iterator begin() const { return m_map.begin(); }
@@ -74,7 +63,7 @@ public:
     struct KeyAndCount {
         KeyAndCount() { }
         
-        KeyAndCount(const T& key, CounterType count)
+        KeyAndCount(const T& key, unsigned long count)
             : key(key)
             , count(count)
         {
@@ -91,7 +80,7 @@ public:
         }
 
         T key;
-        CounterType count;
+        unsigned long count;
     };
     
     // Returns a list ordered from lowest-count to highest-count.
@@ -105,18 +94,8 @@ public:
         return list;
     }
     
-    void clear() { m_map.clear(); }
-    
-    template<typename Functor>
-    void removeIf(const Functor& functor)
-    {
-        m_map.removeIf([functor] (typename HashMap<T, CounterType>::KeyValuePairType& pair) {
-                return functor(KeyAndCount(pair.key, pair.value));
-            });
-    }
-    
 private:
-    HashMap<T, CounterType> m_map;
+    HashMap<T, unsigned long> m_map;
 };
 
 } // namespace WTF