DFG should support continuous optimization
authorfpizlo@apple.com <fpizlo@apple.com@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Wed, 21 Sep 2011 23:36:35 +0000 (23:36 +0000)
committerfpizlo@apple.com <fpizlo@apple.com@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Wed, 21 Sep 2011 23:36:35 +0000 (23:36 +0000)
https://bugs.webkit.org/show_bug.cgi?id=68329

Reviewed by Geoffrey Garen.

This adds the ability to reoptimize a code block if speculation
failures happen frequently. 6% speed-up on Kraken, 1% slow-down
on V8, neutral on SunSpider.

* CMakeLists.txt:
* GNUmakefile.list.am:
* JavaScriptCore.pro:
* JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCore.vcproj:
* JavaScriptCore.vcproj/WTF/WTF.vcproj:
* JavaScriptCore.xcodeproj/project.pbxproj:
* bytecode/CodeBlock.cpp:
(JSC::CodeBlock::CodeBlock):
(JSC::ProgramCodeBlock::jettison):
(JSC::EvalCodeBlock::jettison):
(JSC::FunctionCodeBlock::jettison):
(JSC::CodeBlock::shouldOptimizeNow):
(JSC::CodeBlock::dumpValueProfiles):
* bytecode/CodeBlock.h:
* dfg/DFGByteCodeParser.cpp:
(JSC::DFG::ByteCodeParser::getStrongPrediction):
* dfg/DFGJITCompiler.cpp:
(JSC::DFG::JITCompiler::exitSpeculativeWithOSR):
(JSC::DFG::JITCompiler::compileEntry):
(JSC::DFG::JITCompiler::compileBody):
* dfg/DFGJITCompiler.h:
(JSC::DFG::JITCompiler::noticeOSREntry):
* dfg/DFGOSREntry.cpp:
(JSC::DFG::prepareOSREntry):
* dfg/DFGOSREntry.h:
(JSC::DFG::getOSREntryDataBytecodeIndex):
* dfg/DFGSpeculativeJIT.cpp:
(JSC::DFG::SpeculativeJIT::compile):
* heap/ConservativeRoots.cpp:
(JSC::ConservativeRoots::ConservativeRoots):
(JSC::ConservativeRoots::~ConservativeRoots):
(JSC::DummyMarkHook::mark):
(JSC::ConservativeRoots::genericAddPointer):
(JSC::ConservativeRoots::genericAddSpan):
(JSC::ConservativeRoots::add):
* heap/ConservativeRoots.h:
* heap/Heap.cpp:
(JSC::Heap::addJettisonCodeBlock):
(JSC::Heap::markRoots):
* heap/Heap.h:
* heap/JettisonedCodeBlocks.cpp: Added.
(JSC::JettisonedCodeBlocks::JettisonedCodeBlocks):
(JSC::JettisonedCodeBlocks::~JettisonedCodeBlocks):
(JSC::JettisonedCodeBlocks::addCodeBlock):
(JSC::JettisonedCodeBlocks::clearMarks):
(JSC::JettisonedCodeBlocks::deleteUnmarkedCodeBlocks):
(JSC::JettisonedCodeBlocks::traceCodeBlocks):
* heap/JettisonedCodeBlocks.h: Added.
(JSC::JettisonedCodeBlocks::mark):
* interpreter/RegisterFile.cpp:
(JSC::RegisterFile::gatherConservativeRoots):
* interpreter/RegisterFile.h:
* jit/JITStubs.cpp:
(JSC::DEFINE_STUB_FUNCTION):
* runtime/Executable.cpp:
(JSC::jettisonCodeBlock):
(JSC::EvalExecutable::jettisonOptimizedCode):
(JSC::ProgramExecutable::jettisonOptimizedCode):
(JSC::FunctionExecutable::jettisonOptimizedCodeForCall):
(JSC::FunctionExecutable::jettisonOptimizedCodeForConstruct):
* runtime/Executable.h:
(JSC::FunctionExecutable::jettisonOptimizedCodeFor):
* wtf/BitVector.h: Added.
(WTF::BitVector::BitVector):
(WTF::BitVector::~BitVector):
(WTF::BitVector::operator=):
(WTF::BitVector::size):
(WTF::BitVector::ensureSize):
(WTF::BitVector::resize):
(WTF::BitVector::clearAll):
(WTF::BitVector::get):
(WTF::BitVector::set):
(WTF::BitVector::clear):
(WTF::BitVector::bitsInPointer):
(WTF::BitVector::maxInlineBits):
(WTF::BitVector::byteCount):
(WTF::BitVector::makeInlineBits):
(WTF::BitVector::OutOfLineBits::numBits):
(WTF::BitVector::OutOfLineBits::numWords):
(WTF::BitVector::OutOfLineBits::bits):
(WTF::BitVector::OutOfLineBits::create):
(WTF::BitVector::OutOfLineBits::destroy):
(WTF::BitVector::OutOfLineBits::OutOfLineBits):
(WTF::BitVector::isInline):
(WTF::BitVector::outOfLineBits):
(WTF::BitVector::resizeOutOfLine):
(WTF::BitVector::bits):

git-svn-id: https://svn.webkit.org/repository/webkit/trunk@95681 268f45cc-cd09-0410-ab3c-d52691b4dbfc

27 files changed:
Source/JavaScriptCore/CMakeLists.txt
Source/JavaScriptCore/ChangeLog
Source/JavaScriptCore/GNUmakefile.list.am
Source/JavaScriptCore/JavaScriptCore.pro
Source/JavaScriptCore/JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCore.vcproj
Source/JavaScriptCore/JavaScriptCore.vcproj/WTF/WTF.vcproj
Source/JavaScriptCore/JavaScriptCore.xcodeproj/project.pbxproj
Source/JavaScriptCore/bytecode/CodeBlock.cpp
Source/JavaScriptCore/bytecode/CodeBlock.h
Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp
Source/JavaScriptCore/dfg/DFGJITCompiler.cpp
Source/JavaScriptCore/dfg/DFGJITCompiler.h
Source/JavaScriptCore/dfg/DFGOSREntry.cpp
Source/JavaScriptCore/dfg/DFGOSREntry.h
Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp
Source/JavaScriptCore/heap/ConservativeRoots.cpp
Source/JavaScriptCore/heap/ConservativeRoots.h
Source/JavaScriptCore/heap/Heap.cpp
Source/JavaScriptCore/heap/Heap.h
Source/JavaScriptCore/heap/JettisonedCodeBlocks.cpp [new file with mode: 0644]
Source/JavaScriptCore/heap/JettisonedCodeBlocks.h [new file with mode: 0644]
Source/JavaScriptCore/interpreter/RegisterFile.cpp
Source/JavaScriptCore/interpreter/RegisterFile.h
Source/JavaScriptCore/jit/JITStubs.cpp
Source/JavaScriptCore/runtime/Executable.cpp
Source/JavaScriptCore/runtime/Executable.h
Source/JavaScriptCore/wtf/BitVector.h [new file with mode: 0644]

index fc526b6490b988592fadb6f738c83fe952bba474..afec1cd25bde2d8a706cf1a174a92f23bc325645 100644 (file)
@@ -49,6 +49,7 @@ SET(JavaScriptCore_SOURCES
     heap/Heap.cpp
     heap/HandleHeap.cpp
     heap/HandleStack.cpp
+    heap/JettisonedCodeBlocks.cpp
     heap/MachineStackMarker.cpp
     heap/MarkedBlock.cpp
     heap/MarkedSpace.cpp
index 6c5cde78125ba34c5da5b59430f041bb6adfe3ae..26a8ec79ee0a274748ee32de8af1794c03feaf46 100644 (file)
@@ -1,3 +1,102 @@
+2011-09-21  Filip Pizlo  <fpizlo@apple.com>
+
+        DFG should support continuous optimization
+        https://bugs.webkit.org/show_bug.cgi?id=68329
+
+        Reviewed by Geoffrey Garen.
+        
+        This adds the ability to reoptimize a code block if speculation
+        failures happen frequently. 6% speed-up on Kraken, 1% slow-down
+        on V8, neutral on SunSpider.
+
+        * CMakeLists.txt:
+        * GNUmakefile.list.am:
+        * JavaScriptCore.pro:
+        * JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCore.vcproj:
+        * JavaScriptCore.vcproj/WTF/WTF.vcproj:
+        * JavaScriptCore.xcodeproj/project.pbxproj:
+        * bytecode/CodeBlock.cpp:
+        (JSC::CodeBlock::CodeBlock):
+        (JSC::ProgramCodeBlock::jettison):
+        (JSC::EvalCodeBlock::jettison):
+        (JSC::FunctionCodeBlock::jettison):
+        (JSC::CodeBlock::shouldOptimizeNow):
+        (JSC::CodeBlock::dumpValueProfiles):
+        * bytecode/CodeBlock.h:
+        * dfg/DFGByteCodeParser.cpp:
+        (JSC::DFG::ByteCodeParser::getStrongPrediction):
+        * dfg/DFGJITCompiler.cpp:
+        (JSC::DFG::JITCompiler::exitSpeculativeWithOSR):
+        (JSC::DFG::JITCompiler::compileEntry):
+        (JSC::DFG::JITCompiler::compileBody):
+        * dfg/DFGJITCompiler.h:
+        (JSC::DFG::JITCompiler::noticeOSREntry):
+        * dfg/DFGOSREntry.cpp:
+        (JSC::DFG::prepareOSREntry):
+        * dfg/DFGOSREntry.h:
+        (JSC::DFG::getOSREntryDataBytecodeIndex):
+        * dfg/DFGSpeculativeJIT.cpp:
+        (JSC::DFG::SpeculativeJIT::compile):
+        * heap/ConservativeRoots.cpp:
+        (JSC::ConservativeRoots::ConservativeRoots):
+        (JSC::ConservativeRoots::~ConservativeRoots):
+        (JSC::DummyMarkHook::mark):
+        (JSC::ConservativeRoots::genericAddPointer):
+        (JSC::ConservativeRoots::genericAddSpan):
+        (JSC::ConservativeRoots::add):
+        * heap/ConservativeRoots.h:
+        * heap/Heap.cpp:
+        (JSC::Heap::addJettisonCodeBlock):
+        (JSC::Heap::markRoots):
+        * heap/Heap.h:
+        * heap/JettisonedCodeBlocks.cpp: Added.
+        (JSC::JettisonedCodeBlocks::JettisonedCodeBlocks):
+        (JSC::JettisonedCodeBlocks::~JettisonedCodeBlocks):
+        (JSC::JettisonedCodeBlocks::addCodeBlock):
+        (JSC::JettisonedCodeBlocks::clearMarks):
+        (JSC::JettisonedCodeBlocks::deleteUnmarkedCodeBlocks):
+        (JSC::JettisonedCodeBlocks::traceCodeBlocks):
+        * heap/JettisonedCodeBlocks.h: Added.
+        (JSC::JettisonedCodeBlocks::mark):
+        * interpreter/RegisterFile.cpp:
+        (JSC::RegisterFile::gatherConservativeRoots):
+        * interpreter/RegisterFile.h:
+        * jit/JITStubs.cpp:
+        (JSC::DEFINE_STUB_FUNCTION):
+        * runtime/Executable.cpp:
+        (JSC::jettisonCodeBlock):
+        (JSC::EvalExecutable::jettisonOptimizedCode):
+        (JSC::ProgramExecutable::jettisonOptimizedCode):
+        (JSC::FunctionExecutable::jettisonOptimizedCodeForCall):
+        (JSC::FunctionExecutable::jettisonOptimizedCodeForConstruct):
+        * runtime/Executable.h:
+        (JSC::FunctionExecutable::jettisonOptimizedCodeFor):
+        * wtf/BitVector.h: Added.
+        (WTF::BitVector::BitVector):
+        (WTF::BitVector::~BitVector):
+        (WTF::BitVector::operator=):
+        (WTF::BitVector::size):
+        (WTF::BitVector::ensureSize):
+        (WTF::BitVector::resize):
+        (WTF::BitVector::clearAll):
+        (WTF::BitVector::get):
+        (WTF::BitVector::set):
+        (WTF::BitVector::clear):
+        (WTF::BitVector::bitsInPointer):
+        (WTF::BitVector::maxInlineBits):
+        (WTF::BitVector::byteCount):
+        (WTF::BitVector::makeInlineBits):
+        (WTF::BitVector::OutOfLineBits::numBits):
+        (WTF::BitVector::OutOfLineBits::numWords):
+        (WTF::BitVector::OutOfLineBits::bits):
+        (WTF::BitVector::OutOfLineBits::create):
+        (WTF::BitVector::OutOfLineBits::destroy):
+        (WTF::BitVector::OutOfLineBits::OutOfLineBits):
+        (WTF::BitVector::isInline):
+        (WTF::BitVector::outOfLineBits):
+        (WTF::BitVector::resizeOutOfLine):
+        (WTF::BitVector::bits):
+
 2011-09-21  Gavin Barraclough  <barraclough@apple.com>
 
         Should support value profiling on CPU(X86)
index be74c9b23186fa9bab20005abd64d917fd83bda2..9d119dfb53c2ff335a1216f101aa91981947a1df 100644 (file)
@@ -140,6 +140,8 @@ javascriptcore_sources += \
        Source/JavaScriptCore/heap/Handle.h \
        Source/JavaScriptCore/heap/HandleHeap.cpp \
        Source/JavaScriptCore/heap/HandleHeap.h \
+       Source/JavaScriptCore/heap/JettisonedCodeBlocks.cpp \
+       Source/JavaScriptCore/heap/JettisonedCodeBlocks.h \
        Source/JavaScriptCore/heap/SlotVisitor.h \
        Source/JavaScriptCore/heap/HandleStack.cpp \
        Source/JavaScriptCore/heap/HandleStack.h \
@@ -464,6 +466,7 @@ javascriptcore_sources += \
        Source/JavaScriptCore/wtf/Assertions.h \
        Source/JavaScriptCore/wtf/Atomics.h \
        Source/JavaScriptCore/wtf/AVLTree.h \
+       Source/JavaScriptCore/wtf/BitVector.h \
        Source/JavaScriptCore/wtf/Bitmap.h \
        Source/JavaScriptCore/wtf/BlockStack.h \
        Source/JavaScriptCore/wtf/BloomFilter.h \
index c06e2f97d484cccb5f714fc5902dbc9ffa375176..3db7060b608203091206d61542dabac5e00526d5 100644 (file)
@@ -77,6 +77,7 @@ SOURCES += \
     heap/HandleHeap.cpp \
     heap/HandleStack.cpp \
     heap/Heap.cpp \
+    heap/JettisonedCodeBlocks.cpp \
     heap/MachineStackMarker.cpp \
     heap/MarkStack.cpp \
     heap/MarkedBlock.cpp \
index 3b30834e2dd7db5ec53eda0e7c470536ca09aa70..84bd2547fa33c478bea81ceea1216d5901818814 100644 (file)
                                     RelativePath="..\..\heap\HandleStack.h"
                                     >
                             </File>
+                            <File
+                                    RelativePath="..\..\heap\JettisonedCodeBlocks.cpp"
+                                    >
+                            </File>
+                            <File
+                                    RelativePath="..\..\heap\JettisonedCodeBlocks.h"
+                                    >
+                            </File>
                             <File
                                     RelativePath="..\..\heap\Local.h"
                                     >
index c829d8345c4a4a22d04cf40d46a6a4b333d2ca9d..a1b1d57dbfc430b558922311167d77f2eafd877e 100644 (file)
                        RelativePath="..\..\wtf\AVLTree.h"
                        >
                </File>
+               <File
+                       RelativePath="..\..\wtf\BitVector.h"
+                       >
+               </File>
                <File
                        RelativePath="..\..\wtf\Bitmap.h"
                        >
index c6d7f3c7d22da4c0512b261499e75c1ffaad84b2..38f970ca50f680221e77b8b42f4d45152af491b5 100644 (file)
@@ -74,6 +74,9 @@
                0FD82E86141F3FF100179C94 /* PredictedType.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0FD82E84141F3FDA00179C94 /* PredictedType.cpp */; };
                0FD82E9014207A5F00179C94 /* ValueProfile.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0FD82E8E14207A5100179C94 /* ValueProfile.cpp */; };
                0FD82EF51423075B00179C94 /* DFGIntrinsic.h in Headers */ = {isa = PBXBuildFile; fileRef = 0FD82EF31423073900179C94 /* DFGIntrinsic.h */; settings = {ATTRIBUTES = (Private, ); }; };
+               0FD82F2B1426CA6D00179C94 /* JettisonedCodeBlocks.h in Headers */ = {isa = PBXBuildFile; fileRef = 0FD82F291426CA5A00179C94 /* JettisonedCodeBlocks.h */; settings = {ATTRIBUTES = (Private, ); }; };
+               0FD82F2C1426CA7400179C94 /* JettisonedCodeBlocks.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0FD82F281426CA5A00179C94 /* JettisonedCodeBlocks.cpp */; };
+               0FD82F4B142806A100179C94 /* BitVector.h in Headers */ = {isa = PBXBuildFile; fileRef = 0FD82F491428069200179C94 /* BitVector.h */; settings = {ATTRIBUTES = (Private, ); }; };
                1400067712A6F7830064D123 /* OSAllocator.h in Headers */ = {isa = PBXBuildFile; fileRef = 1400067612A6F7830064D123 /* OSAllocator.h */; settings = {ATTRIBUTES = (Private, ); }; };
                1400069312A6F9E10064D123 /* OSAllocatorPosix.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1400069212A6F9E10064D123 /* OSAllocatorPosix.cpp */; };
                140566C4107EC255005DBC8D /* JSAPIValueWrapper.cpp in Sources */ = {isa = PBXBuildFile; fileRef = BC0894D50FAFBA2D00001865 /* JSAPIValueWrapper.cpp */; };
                0FD82E84141F3FDA00179C94 /* PredictedType.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = PredictedType.cpp; sourceTree = "<group>"; };
                0FD82E8E14207A5100179C94 /* ValueProfile.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ValueProfile.cpp; sourceTree = "<group>"; };
                0FD82EF31423073900179C94 /* DFGIntrinsic.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = DFGIntrinsic.h; path = dfg/DFGIntrinsic.h; sourceTree = "<group>"; };
+               0FD82F281426CA5A00179C94 /* JettisonedCodeBlocks.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = JettisonedCodeBlocks.cpp; sourceTree = "<group>"; };
+               0FD82F291426CA5A00179C94 /* JettisonedCodeBlocks.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = JettisonedCodeBlocks.h; sourceTree = "<group>"; };
+               0FD82F491428069200179C94 /* BitVector.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = BitVector.h; sourceTree = "<group>"; };
                1400067612A6F7830064D123 /* OSAllocator.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = OSAllocator.h; sourceTree = "<group>"; };
                1400069212A6F9E10064D123 /* OSAllocatorPosix.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = OSAllocatorPosix.cpp; sourceTree = "<group>"; };
                140D17D60E8AD4A9000CD17D /* JSBasePrivate.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = JSBasePrivate.h; sourceTree = "<group>"; };
                142E312A134FF0A600AFADB5 /* heap */ = {
                        isa = PBXGroup;
                        children = (
+                               0FD82F281426CA5A00179C94 /* JettisonedCodeBlocks.cpp */,
+                               0FD82F291426CA5A00179C94 /* JettisonedCodeBlocks.h */,
                                A70456AF1427FB150037DA68 /* AllocationSpace.h */,
                                A70456AE1427FB030037DA68 /* AllocationSpace.cpp */,
                                0F242DA513F3B1BB007ADD4C /* WeakReferenceHarvester.h */,
                65162EF108E6A21C007556CD /* wtf */ = {
                        isa = PBXGroup;
                        children = (
+                               0FD82F491428069200179C94 /* BitVector.h */,
                                C22C524813FAF6EF00B7DC0D /* dtoa */,
                                06D358A00DAAD9C4003B174E /* mac */,
                                8656573E115BE35200291F40 /* text */,
                                BC257DE80E1F51C50016B6C9 /* Arguments.h in Headers */,
                                86D3B2C410156BDE002865E7 /* ARMAssembler.h in Headers */,
                                86ADD1450FDDEA980006EEC2 /* ARMv7Assembler.h in Headers */,
+                               0FD82F4B142806A100179C94 /* BitVector.h in Headers */,
+                               0FD82F2B1426CA6D00179C94 /* JettisonedCodeBlocks.h in Headers */,
                                0FD82EF51423075B00179C94 /* DFGIntrinsic.h in Headers */,
                                0FD82E85141F3FE300179C94 /* BoundsCheckedPointer.h in Headers */,
                                0FD82E57141DAF1000179C94 /* DFGOSREntry.h in Headers */,
                        isa = PBXSourcesBuildPhase;
                        buildActionMask = 2147483647;
                        files = (
+                               0FD82F2C1426CA7400179C94 /* JettisonedCodeBlocks.cpp in Sources */,
                                0FD82E9014207A5F00179C94 /* ValueProfile.cpp in Sources */,
                                0FD82E86141F3FF100179C94 /* PredictedType.cpp in Sources */,
                                0FD82E56141DAF0800179C94 /* DFGOSREntry.cpp in Sources */,
index 231ad2f03e696a4af8a0e9e3a7d265a9687f8d2b..655f96110090ee85641f6e97c2b0f88223e53937 100644 (file)
@@ -1428,7 +1428,10 @@ CodeBlock::CodeBlock(ScriptExecutable* ownerExecutable, CodeType codeType, JSGlo
     , m_sourceOffset(sourceOffset)
     , m_symbolTable(symTab)
     , m_alternative(alternative)
+    , m_speculativeSuccessCounter(0)
+    , m_speculativeFailCounter(0)
     , m_optimizationDelayCounter(0)
+    , m_reoptimizationRetryCounter(0)
 {
     ASSERT(m_source);
     
@@ -1935,6 +1938,27 @@ bool FunctionCodeBlock::canCompileWithDFG()
         return DFG::canCompileFunctionForConstruct(this);
     return DFG::canCompileFunctionForCall(this);
 }
+
+void ProgramCodeBlock::jettison(JSGlobalData& globalData)
+{
+    ASSERT(getJITType() != JITCode::BaselineJIT);
+    ASSERT(this == replacement());
+    static_cast<ProgramExecutable*>(ownerExecutable())->jettisonOptimizedCode(globalData);
+}
+
+void EvalCodeBlock::jettison(JSGlobalData& globalData)
+{
+    ASSERT(getJITType() != JITCode::BaselineJIT);
+    ASSERT(this == replacement());
+    static_cast<EvalExecutable*>(ownerExecutable())->jettisonOptimizedCode(globalData);
+}
+
+void FunctionCodeBlock::jettison(JSGlobalData& globalData)
+{
+    ASSERT(getJITType() != JITCode::BaselineJIT);
+    ASSERT(this == replacement());
+    static_cast<FunctionExecutable*>(ownerExecutable())->jettisonOptimizedCodeFor(globalData, m_isConstructor ? CodeForConstruct : CodeForCall);
+}
 #endif
 
 #if ENABLE(VALUE_PROFILER)
@@ -1974,8 +1998,8 @@ bool CodeBlock::shouldOptimizeNow()
     printf("Profile hotness: %lf, %lf\n", (double)numberOfLiveNonArgumentValueProfiles / numberOfNonArgumentValueProfiles, (double)numberOfSamplesInProfiles / ValueProfile::numberOfBuckets / numberOfValueProfiles());
 #endif
 
-    if ((double)numberOfLiveNonArgumentValueProfiles / numberOfNonArgumentValueProfiles >= 0.75
-        && (double)numberOfSamplesInProfiles / ValueProfile::numberOfBuckets / numberOfValueProfiles() >= 0.5)
+    if ((!numberOfNonArgumentValueProfiles || (double)numberOfLiveNonArgumentValueProfiles / numberOfNonArgumentValueProfiles >= 0.75)
+        && (!numberOfValueProfiles() || (double)numberOfSamplesInProfiles / ValueProfile::numberOfBuckets / numberOfValueProfiles() >= 0.5))
         return true;
     
     m_optimizationDelayCounter++;
@@ -2017,6 +2041,11 @@ void CodeBlock::dumpValueProfiles()
         RareCaseProfile* profile = rareCaseProfile(i);
         fprintf(stderr, "   bc = %d: %u\n", profile->m_bytecodeOffset, profile->m_counter);
     }
+    fprintf(stderr, "SpecialFastCaseProfile for %p:\n", this);
+    for (unsigned i = 0; i < numberOfSpecialFastCaseProfiles(); ++i) {
+        RareCaseProfile* profile = specialFastCaseProfile(i);
+        fprintf(stderr, "   bc = %d: %u\n", profile->m_bytecodeOffset, profile->m_counter);
+    }
 }
 #endif
 
index d8bcd0bea0d69cbe398a721ace2e41d114a5ee65..5a577d0f31687da139c93289bfb0634c779fd8c5 100644 (file)
@@ -31,6 +31,7 @@
 #define CodeBlock_h
 
 #include "CompactJITCodeMap.h"
+#include "DFGOSREntry.h"
 #include "EvalCodeCache.h"
 #include "Instruction.h"
 #include "JITCode.h"
@@ -333,6 +334,21 @@ namespace JSC {
         {
             return m_jitCodeMap.get();
         }
+        
+        DFG::OSREntryData* appendDFGOSREntryData(unsigned bytecodeIndex, unsigned machineCodeOffset)
+        {
+            DFG::OSREntryData entry;
+            entry.m_bytecodeIndex = bytecodeIndex;
+            entry.m_machineCodeOffset = machineCodeOffset;
+            m_dfgOSREntry.append(entry);
+            return &m_dfgOSREntry.last();
+        }
+        unsigned numberOfDFGOSREntries() const { return m_dfgOSREntry.size(); }
+        DFG::OSREntryData* dfgOSREntryData(unsigned i) { return &m_dfgOSREntry[i]; }
+        DFG::OSREntryData* dfgOSREntryDataForBytecodeIndex(unsigned bytecodeIndex)
+        {
+            return binarySearch<DFG::OSREntryData, unsigned, DFG::getOSREntryDataBytecodeIndex>(m_dfgOSREntry.begin(), m_dfgOSREntry.size(), bytecodeIndex);
+        }
 #endif
 
 #if ENABLE(INTERPRETER)
@@ -360,9 +376,11 @@ namespace JSC {
             m_jitCodeWithArityCheck = codeWithArityCheck;
         }
         JITCode& getJITCode() { return m_jitCode; }
+        MacroAssemblerCodePtr getJITCodeWithArityCheck() { return m_jitCodeWithArityCheck; }
         JITCode::JITType getJITType() { return m_jitCode.jitType(); }
         ExecutableMemoryHandle* executableMemory() { return getJITCode().getExecutableMemory(); }
         virtual JSObject* compileOptimized(ExecState*, ScopeChainNode*) = 0;
+        virtual void jettison(JSGlobalData&) = 0;
         virtual CodeBlock* replacement() = 0;
         virtual bool canCompileWithDFG() = 0;
         bool hasOptimizedReplacement()
@@ -685,11 +703,37 @@ namespace JSC {
         // trigger files. All CodeBlocks start out with optimizeAfterWarmUp(),
         // as this is called from the CodeBlock constructor.
         
+        // When we observe a lot of speculation failures, we trigger a
+        // reoptimization. But each time, we increase the optimization trigger
+        // to avoid thrashing.
+        unsigned reoptimizationRetryCounter() const
+        {
+            ASSERT(m_reoptimizationRetryCounter <= 18);
+            return m_reoptimizationRetryCounter;
+        }
+        
+        void countReoptimization()
+        {
+            m_reoptimizationRetryCounter++;
+            if (m_reoptimizationRetryCounter > 18)
+                m_reoptimizationRetryCounter = 18;
+        }
+        
         // These functions are provided to support calling
-        // optimizeAfterWarmUp() from JIT-generated code.
+        // optimizeXYZ() methods from JIT-generated code.
+        static int32_t counterValueForOptimizeNextInvocation()
+        {
+            return 0;
+        }
+        
         int32_t counterValueForOptimizeAfterWarmUp()
         {
-            return -1000;
+            return -1000 << reoptimizationRetryCounter();
+        }
+        
+        int32_t counterValueForOptimizeAfterLongWarmUp()
+        {
+            return -5000 << reoptimizationRetryCounter();
         }
         
         int32_t* addressOfExecuteCounter()
@@ -697,12 +741,18 @@ namespace JSC {
             return &m_executeCounter;
         }
         
+        static ptrdiff_t offsetOfExecuteCounter() { return OBJECT_OFFSETOF(CodeBlock, m_executeCounter); }
+
+        int32_t executeCounter() const { return m_executeCounter; }
+        
+        unsigned optimizationDelayCounter() const { return m_optimizationDelayCounter; }
+        
         // Call this to force the next optimization trigger to fire. This is
         // rarely wise, since optimization triggers are typically more
         // expensive than executing baseline code.
         void optimizeNextInvocation()
         {
-            m_executeCounter = 0;
+            m_executeCounter = counterValueForOptimizeNextInvocation();
         }
         
         // Call this to prevent optimization from happening again. Note that
@@ -726,6 +776,13 @@ namespace JSC {
             m_executeCounter = counterValueForOptimizeAfterWarmUp();
         }
         
+        // Call this to force an optimization trigger to fire only after
+        // a lot of warm-up.
+        void optimizeAfterLongWarmUp()
+        {
+            m_executeCounter = counterValueForOptimizeAfterLongWarmUp();
+        }
+        
         // Call this to cause an optimization trigger to fire soon, but
         // not necessarily the next one. This makes sense if optimization
         // succeeds. Successfuly optimization means that all calls are
@@ -746,18 +803,72 @@ namespace JSC {
         // in the baseline code.
         void optimizeSoon()
         {
-            m_executeCounter = -100;
+            m_executeCounter = -100 << reoptimizationRetryCounter();
+        }
+        
+        // The speculative JIT tracks its success rate, so that we can
+        // decide when to reoptimize. It's interesting to note that these
+        // counters may overflow without any protection. The success
+        // counter will overflow before the fail one does, becuase the
+        // fail one is used as a trigger to reoptimize. So the worst case
+        // is that the success counter overflows and we reoptimize without
+        // needing to. But this is harmless. If a method really did
+        // execute 2^32 times then compiling it again probably won't hurt
+        // anyone.
+        
+        void countSpeculationSuccess()
+        {
+            m_speculativeSuccessCounter++;
         }
         
+        void countSpeculationFailure()
+        {
+            m_speculativeFailCounter++;
+        }
+        
+        uint32_t speculativeSuccessCounter() const { return m_speculativeSuccessCounter; }
+        uint32_t speculativeFailCounter() const { return m_speculativeFailCounter; }
+        
+        uint32_t* addressOfSpeculativeSuccessCounter() { return &m_speculativeSuccessCounter; }
+        uint32_t* addressOfSpeculativeFailCounter() { return &m_speculativeFailCounter; }
+        
+        static ptrdiff_t offsetOfSpeculativeSuccessCounter() { return OBJECT_OFFSETOF(CodeBlock, m_speculativeSuccessCounter); }
+        static ptrdiff_t offsetOfSpeculativeFailCounter() { return OBJECT_OFFSETOF(CodeBlock, m_speculativeFailCounter); }
+        
         // The amount by which the JIT will increment m_executeCounter.
         static unsigned executeCounterIncrementForLoop() { return 1; }
         static unsigned executeCounterIncrementForReturn() { return 15; }
+
+        // The success/failure ratio we want.
+        unsigned desiredSuccessFailRatio() { return 6; }
+        
+        // The number of failures that triggers the use of the ratio.
+        unsigned largeFailCountThreshold() { return 20 << alternative()->reoptimizationRetryCounter(); }
+        unsigned largeFailCountThresholdForLoop() { return 1 << alternative()->reoptimizationRetryCounter(); }
+        
+        bool shouldReoptimizeNow()
+        {
+            return desiredSuccessFailRatio() * speculativeFailCounter() >= speculativeSuccessCounter() && speculativeFailCounter() >= largeFailCountThreshold();
+        }
+        
+        bool shouldReoptimizeFromLoopNow()
+        {
+            return desiredSuccessFailRatio() * speculativeFailCounter() >= speculativeSuccessCounter() && speculativeFailCounter() >= largeFailCountThresholdForLoop();
+        }
         
 #if ENABLE(VALUE_PROFILER)
         bool shouldOptimizeNow();
 #else
         bool shouldOptimizeNow() { return false; }
 #endif
+        
+        void reoptimize(JSGlobalData& globalData)
+        {
+            ASSERT(replacement() != this);
+            replacement()->jettison(globalData);
+            countReoptimization();
+            optimizeAfterWarmUp();
+        }
 
 #if ENABLE(VERBOSE_VALUE_PROFILE)
         void dumpValueProfiles();
@@ -827,6 +938,7 @@ namespace JSC {
 #endif
 #if ENABLE(DFG_JIT)
         OwnPtr<CompactJITCodeMap> m_jitCodeMap;
+        Vector<DFG::OSREntryData> m_dfgOSREntry;
 #endif
 #if ENABLE(VALUE_PROFILER)
         SegmentedVector<ValueProfile, 8> m_valueProfiles;
@@ -851,7 +963,10 @@ namespace JSC {
         OwnPtr<PredictionTracker> m_predictions;
 
         int32_t m_executeCounter;
+        uint32_t m_speculativeSuccessCounter;
+        uint32_t m_speculativeFailCounter;
         uint8_t m_optimizationDelayCounter;
+        uint8_t m_reoptimizationRetryCounter;
 
         struct RareData {
            WTF_MAKE_FAST_ALLOCATED;
@@ -909,6 +1024,7 @@ namespace JSC {
 #if ENABLE(JIT)
     protected:
         virtual JSObject* compileOptimized(ExecState*, ScopeChainNode*);
+        virtual void jettison(JSGlobalData&);
         virtual CodeBlock* replacement();
         virtual bool canCompileWithDFG();
 #endif
@@ -935,6 +1051,7 @@ namespace JSC {
 #if ENABLE(JIT)
     protected:
         virtual JSObject* compileOptimized(ExecState*, ScopeChainNode*);
+        virtual void jettison(JSGlobalData&);
         virtual CodeBlock* replacement();
         virtual bool canCompileWithDFG();
 #endif
@@ -962,6 +1079,7 @@ namespace JSC {
 #if ENABLE(JIT)
     protected:
         virtual JSObject* compileOptimized(ExecState*, ScopeChainNode*);
+        virtual void jettison(JSGlobalData&);
         virtual CodeBlock* replacement();
         virtual bool canCompileWithDFG();
 #endif
index 74d57ccb75a17fc2a51f5443286d288568888fc0..2f2e0904acfbfeb9eb1f0f75e469b19f0a8ffda5 100644 (file)
@@ -492,7 +492,7 @@ private:
         ASSERT(profile);
         PredictedType prediction = profile->computeUpdatedPrediction();
 #if ENABLE(DFG_DEBUG_VERBOSE)
-        printf("Dynamic [%u, %u] prediction: %s\n", nodeIndex, bytecodeIndex, predictionToString(prediction));
+        printf("Dynamic [@%u, bc#%u] prediction: %s\n", nodeIndex, bytecodeIndex, predictionToString(prediction));
 #endif
         return prediction;
     }
index 6d5c2f985767891723b563457670049e5f3f1a9f..52d3157ddb520b36d676b93f16a14a3c3e851e4d 100644 (file)
@@ -408,6 +408,9 @@ void JITCompiler::exitSpeculativeWithOSR(const OSRExit& exit, SpeculationRecover
     //         entry, since both forms of OSR are expensive. OSR entry is
     //         particularly expensive.
     //
+    //     (d) Frequent OSR failures, even those that do not result in the code
+    //         running in a hot loop, result in recompilation getting triggered.
+    //
     //     To ensure (c), we'd like to set the execute counter to
     //     counterValueForOptimizeAfterWarmUp(). This seems like it would endanger
     //     (a) and (b), since then every OSR exit would delay the opportunity for
@@ -415,11 +418,40 @@ void JITCompiler::exitSpeculativeWithOSR(const OSRExit& exit, SpeculationRecover
     //     frequently and the function has few loops, then the counter will never
     //     become non-negative and OSR entry will never be triggered. OSR entry
     //     will only happen if a loop gets hot in the old JIT, which does a pretty
-    //     good job of ensuring (a) and (b). This heuristic may need to be
-    //     rethought in the future, particularly if we support reoptimizing code
-    //     with new value profiles gathered from code that did OSR exit.
+    //     good job of ensuring (a) and (b). But that doesn't take care of (d),
+    //     since each speculation failure would reset the execute counter.
+    //     So we check here if the number of speculation failures is significantly
+    //     larger than the number of successes (we want 90% success rate), and if
+    //     there have been a large enough number of failures. If so, we set the
+    //     counter to 0; otherwise we set the counter to
+    //     counterValueForOptimizeAfterWarmUp().
+    
+    move(TrustedImmPtr(codeBlock()), GPRInfo::regT0);
+    
+    load32(Address(GPRInfo::regT0, CodeBlock::offsetOfSpeculativeFailCounter()), GPRInfo::regT2);
+    load32(Address(GPRInfo::regT0, CodeBlock::offsetOfSpeculativeSuccessCounter()), GPRInfo::regT1);
+    add32(Imm32(1), GPRInfo::regT2);
+    add32(Imm32(-1), GPRInfo::regT1);
+    store32(GPRInfo::regT2, Address(GPRInfo::regT0, CodeBlock::offsetOfSpeculativeFailCounter()));
+    store32(GPRInfo::regT1, Address(GPRInfo::regT0, CodeBlock::offsetOfSpeculativeSuccessCounter()));
+    
+    move(TrustedImmPtr(codeBlock()->alternative()), GPRInfo::regT0);
+    
+    Jump fewFails = branch32(BelowOrEqual, GPRInfo::regT2, Imm32(codeBlock()->largeFailCountThreshold()));
+    mul32(Imm32(codeBlock()->desiredSuccessFailRatio()), GPRInfo::regT2, GPRInfo::regT2);
+    
+    Jump lowFailRate = branch32(BelowOrEqual, GPRInfo::regT2, GPRInfo::regT1);
     
-    store32(Imm32(codeBlock()->alternative()->counterValueForOptimizeAfterWarmUp()), codeBlock()->alternative()->addressOfExecuteCounter());
+    // Reoptimize as soon as possible.
+    store32(Imm32(CodeBlock::counterValueForOptimizeNextInvocation()), Address(GPRInfo::regT0, CodeBlock::offsetOfExecuteCounter()));
+    Jump doneAdjusting = jump();
+    
+    fewFails.link(this);
+    lowFailRate.link(this);
+    
+    store32(Imm32(codeBlock()->alternative()->counterValueForOptimizeAfterLongWarmUp()), Address(GPRInfo::regT0, CodeBlock::offsetOfExecuteCounter()));
+    
+    doneAdjusting.link(this);
     
     // 12) Load the result of the last bytecode operation into regT0.
     
@@ -481,6 +513,8 @@ void JITCompiler::compileEntry()
     // both normal return code and when jumping to an exception handler).
     preserveReturnAddressAfterCall(GPRInfo::regT2);
     emitPutToCallFrameHeader(GPRInfo::regT2, RegisterFile::ReturnPC);
+    
+    addPtr(Imm32(1), AbsoluteAddress(codeBlock()->addressOfSpeculativeSuccessCounter()));
 }
 
 void JITCompiler::compileBody()
@@ -498,10 +532,6 @@ void JITCompiler::compileBody()
     bool compiledSpeculative = speculative.compile();
     ASSERT_UNUSED(compiledSpeculative, compiledSpeculative);
 
-#if ENABLE(DFG_OSR_ENTRY)
-    m_codeBlock->setJITCodeMap(m_jitCodeMapEncoder.finish());
-#endif
-    
     linkOSRExits(speculative);
 
     // Iterate over the m_calls vector, checking for exception checks,
index c6e1d36ffbe07559ad80324ea649082b3e8a7686..90fa68a61f6de10f831a2523cc2073d1188cf52f 100644 (file)
@@ -320,7 +320,46 @@ public:
     
     void noticeOSREntry(BasicBlock& basicBlock)
     {
-        m_jitCodeMapEncoder.append(basicBlock.bytecodeBegin, differenceBetween(m_startOfCode, label()));
+#if ENABLE(DFG_OSR_ENTRY)
+        OSREntryData* entry = codeBlock()->appendDFGOSREntryData(basicBlock.bytecodeBegin, differenceBetween(m_startOfCode, label()));
+        
+        unsigned lastLiveArgument = 0;
+        unsigned lastLiveLocal = 0;
+        
+        for (unsigned i = 0; i < basicBlock.m_arguments.size(); ++i) {
+            if (basicBlock.m_arguments[i].value != NoNode)
+                lastLiveArgument = i;
+        }
+        
+        for (unsigned i = 0; i < basicBlock.m_locals.size(); ++i) {
+            if (basicBlock.m_locals[i].value != NoNode)
+                lastLiveLocal = i;
+        }
+        
+        if (lastLiveArgument) {
+            entry->m_liveArguments.resize(lastLiveArgument + 1);
+            entry->m_liveArguments.clearAll();
+            
+            for (unsigned i = 0; i <= lastLiveArgument; ++i) {
+                if (basicBlock.m_arguments[i].value != NoNode)
+                    entry->m_liveArguments.set(i);
+            }
+        } else
+            entry->m_liveArguments.clearAll();
+        
+        if (lastLiveLocal) {
+            entry->m_liveVariables.resize(lastLiveLocal + 1);
+            entry->m_liveVariables.clearAll();
+            
+            for (unsigned i = 0; i <= lastLiveLocal; ++i) {
+                if (basicBlock.m_locals[i].value != NoNode)
+                    entry->m_liveVariables.set(i);
+            }
+        } else
+            entry->m_liveVariables.clearAll();
+#else
+        UNUSED_PARAM(basicBlock);
+#endif
     }
 
 private:
@@ -353,7 +392,6 @@ private:
     
     // JIT code map for OSR entrypoints.
     Label m_startOfCode;
-    CompactJITCodeMap::Encoder m_jitCodeMapEncoder;
 
     struct PropertyAccessRecord {
         PropertyAccessRecord(Call functionCall, int16_t deltaCheckImmToCall, int16_t deltaCallToStructCheck, int16_t deltaCallToLoadOrStore, int16_t deltaCallToSlowCase, int16_t deltaCallToDone, int8_t baseGPR, int8_t valueGPR, int8_t scratchGPR)
index 4289cbc9e7f1bd6f97a9d59d93a43f7927043560..de9b3a48d296631c34a5a1505daa3be8a75099c1 100644 (file)
@@ -58,7 +58,8 @@ void* prepareOSREntry(ExecState* exec, CodeBlock* codeBlock, unsigned bytecodeIn
     ASSERT(codeBlock->getJITType() == JITCode::DFGJIT);
     ASSERT(codeBlock->alternative());
     ASSERT(codeBlock->alternative()->getJITType() == JITCode::BaselineJIT);
-    ASSERT(codeBlock->jitCodeMap());
+    ASSERT(!codeBlock->jitCodeMap());
+    ASSERT(codeBlock->numberOfDFGOSREntries());
 
 #if ENABLE(JIT_VERBOSE_OSR)
     printf("OSR in %p(%p) from bc#%u\n", codeBlock, codeBlock->alternative(), bytecodeIndex);
@@ -66,6 +67,9 @@ void* prepareOSREntry(ExecState* exec, CodeBlock* codeBlock, unsigned bytecodeIn
     
     JSGlobalData* globalData = &exec->globalData();
     CodeBlock* baselineCodeBlock = codeBlock->alternative();
+    OSREntryData* entry = codeBlock->dfgOSREntryDataForBytecodeIndex(bytecodeIndex);
+    
+    ASSERT(entry->m_bytecodeIndex == bytecodeIndex);
     
     // The code below checks if it is safe to perform OSR entry. It may find
     // that it is unsafe to do so, for any number of reasons, which are documented
@@ -97,7 +101,8 @@ void* prepareOSREntry(ExecState* exec, CodeBlock* codeBlock, unsigned bytecodeIn
         return 0;
     
     for (unsigned i = 1; i < predictions->numberOfArguments(); ++i) {
-        if (!predictionIsValid(globalData, exec->argument(i - 1), predictions->getArgumentPrediction(i))) {
+        if (i < entry->m_liveArguments.size() && entry->m_liveArguments.get(i)
+            && !predictionIsValid(globalData, exec->argument(i - 1), predictions->getArgumentPrediction(i))) {
 #if ENABLE(JIT_VERBOSE_OSR)
             printf("    OSR failed because argument %u is %s, expected %s.\n", i, exec->argument(i - 1).description(), predictionToString(predictions->getArgumentPrediction(i)));
 #endif
@@ -105,11 +110,9 @@ void* prepareOSREntry(ExecState* exec, CodeBlock* codeBlock, unsigned bytecodeIn
         }
     }
     
-    // FIXME: we need to know if at an OSR entry, a variable is live. If it isn't
-    // then we shouldn't try to verify its prediction.
-    
     for (unsigned i = 0; i < predictions->numberOfVariables(); ++i) {
-        if (!predictionIsValid(globalData, exec->registers()[i].jsValue(), predictions->getPrediction(i))) {
+        if (i < entry->m_liveVariables.size() && entry->m_liveVariables.get(i)
+            && !predictionIsValid(globalData, exec->registers()[i].jsValue(), predictions->getPrediction(i))) {
 #if ENABLE(JIT_VERBOSE_OSR)
             printf("    OSR failed because variable %u is %s, expected %s.\n", i, exec->registers()[i].jsValue().description(), predictionToString(predictions->getPrediction(i)));
 #endif
@@ -139,30 +142,9 @@ void* prepareOSREntry(ExecState* exec, CodeBlock* codeBlock, unsigned bytecodeIn
     
     exec->setCodeBlock(codeBlock);
     
-    // 4) Find and return the destination machine code address. The DFG stores
-    //    the machine code offsets of OSR targets in a CompactJITCodeMap.
-    //    Decoding it is not super efficient, but we expect that OSR entry
-    //    happens sufficiently rarely, and that OSR entrypoints are sufficiently
-    //    few, that this won't hurt throughput. Note that the only real
-    //    reason why we use a CompactJITCodeMap is to avoid having to introduce
-    //    yet another data structure for mapping between bytecode indices and
-    //    machine code offsets.
-    
-    CompactJITCodeMap::Decoder decoder(codeBlock->jitCodeMap());
-    unsigned machineCodeOffset = std::numeric_limits<unsigned>::max();
-    while (decoder.numberOfEntriesRemaining()) {
-        unsigned currentBytecodeIndex;
-        unsigned currentMachineCodeOffset;
-        decoder.read(currentBytecodeIndex, currentMachineCodeOffset);
-        if (currentBytecodeIndex == bytecodeIndex) {
-            machineCodeOffset = currentMachineCodeOffset;
-            break;
-        }
-    }
-    
-    ASSERT(machineCodeOffset != std::numeric_limits<unsigned>::max());
+    // 4) Find and return the destination machine code address.
     
-    void* result = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(codeBlock->getJITCode().start()) + machineCodeOffset);
+    void* result = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(codeBlock->getJITCode().start()) + entry->m_machineCodeOffset);
     
 #if ENABLE(JIT_VERBOSE_OSR)
     printf("    OSR returning machine code address %p.\n", result);
index fb4e298ab9a068fd29b3f69ea4761cba92c6c6f6..193e083ce29486eda64604cba12974f3f61eaef3 100644 (file)
@@ -26,6 +26,8 @@
 #ifndef DFGOSREntry_h
 #define DFGOSREntry_h
 
+#include <wtf/BitVector.h>
+
 namespace JSC {
 
 class ExecState;
@@ -34,6 +36,18 @@ class CodeBlock;
 namespace DFG {
 
 #if ENABLE(DFG_JIT)
+struct OSREntryData {
+    unsigned m_bytecodeIndex;
+    unsigned m_machineCodeOffset;
+    BitVector m_liveArguments;
+    BitVector m_liveVariables;
+};
+
+inline unsigned getOSREntryDataBytecodeIndex(OSREntryData* osrEntryData)
+{
+    return osrEntryData->m_bytecodeIndex;
+}
+
 void* prepareOSREntry(ExecState*, CodeBlock*, unsigned bytecodeIndex);
 #else
 inline void* prepareOSREntry(ExecState*, CodeBlock*, unsigned) { return 0; }
index 33cc268995d322f51f8a4b4aee90c1a925350ccc..3d30032bc5af7546f3d35c831f7172becbd174bf 100644 (file)
@@ -877,6 +877,12 @@ void SpeculativeJIT::compile(Node& node)
             break;
         }
         
+        if (shouldNotSpeculateInteger(node.child1())) {
+            // Do it the safe way.
+            nonSpeculativeValueToInt32(node);
+            break;
+        }
+        
         SpeculateIntegerOperand op1(this, node.child1());
         GPRTemporary result(this, op1);
         m_jit.move(op1.gpr(), result.gpr());
index 33c695d30dbd7aa674fc847c1400558ce110e579..e33dd6b849d50c4852dd2f41050afc7d0b69fded 100644 (file)
@@ -26,6 +26,8 @@
 #include "config.h"
 #include "ConservativeRoots.h"
 
+#include "JettisonedCodeBlocks.h"
+
 namespace JSC {
 
 inline bool isPointerAligned(void* p)
@@ -33,6 +35,20 @@ inline bool isPointerAligned(void* p)
     return !((intptr_t)(p) & (sizeof(char*) - 1));
 }
 
+ConservativeRoots::ConservativeRoots(const MarkedBlockSet* blocks)
+    : m_roots(m_inlineRoots)
+    , m_size(0)
+    , m_capacity(inlineCapacity)
+    , m_blocks(blocks)
+{
+}
+
+ConservativeRoots::~ConservativeRoots()
+{
+    if (m_roots != m_inlineRoots)
+        OSAllocator::decommitAndRelease(m_roots, m_capacity * sizeof(JSCell*));
+}
+
 void ConservativeRoots::grow()
 {
     size_t newCapacity = m_capacity == inlineCapacity ? nonInlineCapacity : m_capacity * 2;
@@ -44,8 +60,16 @@ void ConservativeRoots::grow()
     m_roots = newRoots;
 }
 
-inline void ConservativeRoots::add(void* p, TinyBloomFilter filter)
+class DummyMarkHook {
+public:
+    void mark(void*) { }
+};
+
+template<typename MarkHook>
+inline void ConservativeRoots::genericAddPointer(void* p, TinyBloomFilter filter, MarkHook& markHook)
 {
+    markHook.mark(p);
+    
     MarkedBlock* candidate = MarkedBlock::blockFor(p);
     if (filter.ruleOut(reinterpret_cast<Bits>(candidate))) {
         ASSERT(!candidate || !m_blocks->set().contains(candidate));
@@ -70,7 +94,8 @@ inline void ConservativeRoots::add(void* p, TinyBloomFilter filter)
     m_roots[m_size++] = static_cast<JSCell*>(p);
 }
 
-void ConservativeRoots::add(void* begin, void* end)
+template<typename MarkHook>
+void ConservativeRoots::genericAddSpan(void* begin, void* end, MarkHook& markHook)
 {
     ASSERT(begin <= end);
     ASSERT((static_cast<char*>(end) - static_cast<char*>(begin)) < 0x1000000);
@@ -79,7 +104,18 @@ void ConservativeRoots::add(void* begin, void* end)
 
     TinyBloomFilter filter = m_blocks->filter(); // Make a local copy of filter to show the compiler it won't alias, and can be register-allocated.
     for (char** it = static_cast<char**>(begin); it != static_cast<char**>(end); ++it)
-        add(*it, filter);
+        genericAddPointer(*it, filter, markHook);
+}
+
+void ConservativeRoots::add(void* begin, void* end)
+{
+    DummyMarkHook dummyMarkHook;
+    genericAddSpan(begin, end, dummyMarkHook);
+}
+
+void ConservativeRoots::add(void* begin, void* end, JettisonedCodeBlocks& jettisonedCodeBlocks)
+{
+    genericAddSpan(begin, end, jettisonedCodeBlocks);
 }
 
 } // namespace JSC
index e10050d0a41d14a8c10f030c725f7316d4322671..035db8f05f8f702a85c1a8fbc24f0509465dfe76 100644 (file)
@@ -33,6 +33,7 @@
 namespace JSC {
 
 class JSCell;
+class JettisonedCodeBlocks;
 class Heap;
 
 class ConservativeRoots {
@@ -41,6 +42,7 @@ public:
     ~ConservativeRoots();
 
     void add(void* begin, void* end);
+    void add(void* begin, void* end, JettisonedCodeBlocks&);
     
     size_t size();
     JSCell** roots();
@@ -49,7 +51,12 @@ private:
     static const size_t inlineCapacity = 128;
     static const size_t nonInlineCapacity = 8192 / sizeof(JSCell*);
     
-    void add(void*, TinyBloomFilter);
+    template<typename MarkHook>
+    void genericAddPointer(void*, TinyBloomFilter, MarkHook&);
+
+    template<typename MarkHook>
+    void genericAddSpan(void*, void* end, MarkHook&);
+    
     void grow();
 
     JSCell** m_roots;
@@ -59,20 +66,6 @@ private:
     JSCell* m_inlineRoots[inlineCapacity];
 };
 
-inline ConservativeRoots::ConservativeRoots(const MarkedBlockSet* blocks)
-    : m_roots(m_inlineRoots)
-    , m_size(0)
-    , m_capacity(inlineCapacity)
-    , m_blocks(blocks)
-{
-}
-
-inline ConservativeRoots::~ConservativeRoots()
-{
-    if (m_roots != m_inlineRoots)
-        OSAllocator::decommitAndRelease(m_roots, m_capacity * sizeof(JSCell*));
-}
-
 inline size_t ConservativeRoots::size()
 {
     return m_size;
index 9b4ed6ae025e42170a0b951b9f1a790b6259db47..f6e1d5e5382e034322f831e58ed105307db151bd 100644 (file)
@@ -394,6 +394,11 @@ void Heap::markProtectedObjects(HeapRootVisitor& heapRootVisitor)
         heapRootVisitor.visit(&it->first);
 }
 
+void Heap::addJettisonedCodeBlock(PassOwnPtr<CodeBlock> codeBlock)
+{
+    m_jettisonedCodeBlocks.addCodeBlock(codeBlock);
+}
+
 void Heap::pushTempSortVector(Vector<ValueStringPair>* tempVector)
 {
     m_tempSortingVectors.append(tempVector);
@@ -456,20 +461,22 @@ void Heap::markRoots()
     m_operationInProgress = Collection;
 
     void* dummy;
-
+    
     // We gather conservative roots before clearing mark bits because conservative
     // gathering uses the mark bits to determine whether a reference is valid.
     ConservativeRoots machineThreadRoots(&m_objectSpace.blocks());
     m_machineThreads.gatherConservativeRoots(machineThreadRoots, &dummy);
 
     ConservativeRoots registerFileRoots(&m_objectSpace.blocks());
-    registerFile().gatherConservativeRoots(registerFileRoots);
+    m_jettisonedCodeBlocks.clearMarks();
+    registerFile().gatherConservativeRoots(registerFileRoots, m_jettisonedCodeBlocks);
+    m_jettisonedCodeBlocks.deleteUnmarkedCodeBlocks();
 
     clearMarks();
 
     SlotVisitor& visitor = m_slotVisitor;
     HeapRootVisitor heapRootVisitor(visitor);
-
+    
     visitor.append(machineThreadRoots);
     visitor.drain();
 
@@ -493,6 +500,9 @@ void Heap::markRoots()
 
     m_handleStack.visit(heapRootVisitor);
     visitor.drain();
+    
+    m_jettisonedCodeBlocks.traceCodeBlocks(visitor);
+    visitor.drain();
 
     // Weak handles must be marked last, because their owners use the set of
     // opaque roots to determine reachability.
index 4ff2603bd0d52e6af2654c3000f9e81c6552f10e..efe5f469c0647e2525a41407b664f81d975cc1cf 100644 (file)
@@ -25,6 +25,7 @@
 #include "AllocationSpace.h"
 #include "HandleHeap.h"
 #include "HandleStack.h"
+#include "JettisonedCodeBlocks.h"
 #include "MarkedBlock.h"
 #include "MarkedBlockSet.h"
 #include "MarkedSpace.h"
@@ -98,6 +99,8 @@ namespace JSC {
 
         void protect(JSValue);
         bool unprotect(JSValue); // True when the protect count drops to 0.
+        
+        void addJettisonedCodeBlock(PassOwnPtr<CodeBlock>);
 
         size_t size();
         size_t capacity();
@@ -191,6 +194,7 @@ namespace JSC {
         SlotVisitor m_slotVisitor;
         HandleHeap m_handleHeap;
         HandleStack m_handleStack;
+        JettisonedCodeBlocks m_jettisonedCodeBlocks;
         
         bool m_isSafeToCollect;
 
diff --git a/Source/JavaScriptCore/heap/JettisonedCodeBlocks.cpp b/Source/JavaScriptCore/heap/JettisonedCodeBlocks.cpp
new file mode 100644 (file)
index 0000000..4f22740
--- /dev/null
@@ -0,0 +1,83 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#include "config.h"
+#include "JettisonedCodeBlocks.h"
+
+#include "CodeBlock.h"
+#include "SlotVisitor.h"
+#include <wtf/Vector.h>
+
+namespace JSC {
+
+JettisonedCodeBlocks::JettisonedCodeBlocks() { }
+
+JettisonedCodeBlocks::~JettisonedCodeBlocks()
+{
+    WTF::deleteAllKeys(m_map);
+}
+
+void JettisonedCodeBlocks::addCodeBlock(PassOwnPtr<CodeBlock> codeBlock)
+{
+    ASSERT(m_map.find(codeBlock.get()) == m_map.end());
+    
+    m_map.add(codeBlock.leakPtr(), false);
+}
+
+void JettisonedCodeBlocks::clearMarks()
+{
+    HashMap<CodeBlock*, bool>::iterator begin = m_map.begin();
+    HashMap<CodeBlock*, bool>::iterator end = m_map.end();
+    for (HashMap<CodeBlock*, bool>::iterator iter = begin; iter != end; ++iter)
+        iter->second = false;
+}
+
+void JettisonedCodeBlocks::deleteUnmarkedCodeBlocks()
+{
+    Vector<CodeBlock*> toRemove;
+    
+    HashMap<CodeBlock*, bool>::iterator begin = m_map.begin();
+    HashMap<CodeBlock*, bool>::iterator end = m_map.end();
+    for (HashMap<CodeBlock*, bool>::iterator iter = begin; iter != end; ++iter) {
+        if (!iter->second)
+            toRemove.append(iter->first);
+    }
+    
+    for (unsigned i = 0; i < toRemove.size(); ++i) {
+        m_map.remove(toRemove[i]);
+        delete toRemove[i];
+    }
+}
+
+void JettisonedCodeBlocks::traceCodeBlocks(SlotVisitor& slotVisitor)
+{
+    HashMap<CodeBlock*, bool>::iterator begin = m_map.begin();
+    HashMap<CodeBlock*, bool>::iterator end = m_map.end();
+    for (HashMap<CodeBlock*, bool>::iterator iter = begin; iter != end; ++iter)
+        iter->first->visitAggregate(slotVisitor);
+}
+
+} // namespace JSC
+
diff --git a/Source/JavaScriptCore/heap/JettisonedCodeBlocks.h b/Source/JavaScriptCore/heap/JettisonedCodeBlocks.h
new file mode 100644 (file)
index 0000000..38dc99f
--- /dev/null
@@ -0,0 +1,79 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#ifndef JettisonedCodeBlocks_h
+#define JettisonedCodeBlocks_h
+
+#include <wtf/FastAllocBase.h>
+#include <wtf/HashMap.h>
+#include <wtf/PassOwnPtr.h>
+
+namespace JSC {
+
+class CodeBlock;
+class SlotVisitor;
+
+class JettisonedCodeBlocks {
+    WTF_MAKE_FAST_ALLOCATED; // Only malloc'd in ConservativeRoots
+public:
+    JettisonedCodeBlocks();
+    
+    ~JettisonedCodeBlocks();
+    
+    void addCodeBlock(PassOwnPtr<CodeBlock>);
+    
+    void clearMarks();
+    
+    void mark(void* candidateCodeBlock)
+    {
+        // We have to check for 0 and -1 because those are used by the HashMap as markers.
+        uintptr_t value = reinterpret_cast<uintptr_t>(candidateCodeBlock);
+        
+        // This checks for both of those nasty cases in one go.
+        // 0 + 1 = 1
+        // -1 + 1 = 0
+        if (value + 1 <= 1)
+            return;
+        
+        HashMap<CodeBlock*, bool>::iterator iter = m_map.find(static_cast<CodeBlock*>(candidateCodeBlock));
+        if (iter == m_map.end())
+            return;
+        iter->second = true;
+    }
+    
+    void deleteUnmarkedCodeBlocks();
+    
+    void traceCodeBlocks(SlotVisitor&);
+
+private:
+    // It would be great to use an OwnPtr<CodeBlock> here but that would
+    // almost certainly not work.
+    HashMap<CodeBlock*, bool> m_map;
+};
+
+} // namespace JSC
+
+#endif // JettisonedCodeBlocks_h
+
index aa9b24971730345ec352af2343a9e6bc8172cb2e..79d62393fc104c6ddf37efb43d0bf55a258c802d 100644 (file)
@@ -55,6 +55,11 @@ void RegisterFile::gatherConservativeRoots(ConservativeRoots& conservativeRoots)
     conservativeRoots.add(begin(), end());
 }
 
+void RegisterFile::gatherConservativeRoots(ConservativeRoots& conservativeRoots, JettisonedCodeBlocks& jettisonedCodeBlocks)
+{
+    conservativeRoots.add(begin(), end(), jettisonedCodeBlocks);
+}
+
 void RegisterFile::releaseExcessCapacity()
 {
     ptrdiff_t delta = reinterpret_cast<uintptr_t>(m_commitEnd) - reinterpret_cast<uintptr_t>(m_reservation.base());
index 91b80ade4ddf1d8a8f5316ff99862b78d362c628..2515da3b5965564ba9cdfba783594d03f9f819aa 100644 (file)
@@ -38,6 +38,7 @@
 namespace JSC {
 
     class ConservativeRoots;
+    class JettisonedCodeBlocks;
 
     class RegisterFile {
         WTF_MAKE_NONCOPYABLE(RegisterFile);
@@ -64,6 +65,7 @@ namespace JSC {
         ~RegisterFile();
         
         void gatherConservativeRoots(ConservativeRoots&);
+        void gatherConservativeRoots(ConservativeRoots&, JettisonedCodeBlocks&);
 
         Register* begin() const { return static_cast<Register*>(m_reservation.base()); }
         Register* end() const { return m_end; }
index d0bf7586d01bbe438843e18fac190fe32347ee38..da012c3e527a3a8c4e6b7fd305234b40d668244d 100644 (file)
@@ -1914,7 +1914,22 @@ DEFINE_STUB_FUNCTION(void, optimize_from_loop)
     CodeBlock* codeBlock = callFrame->codeBlock();
     unsigned bytecodeIndex = stackFrame.args[0].int32();
 
-    if (!codeBlock->hasOptimizedReplacement()) {
+#if ENABLE(JIT_VERBOSE_OSR)
+    printf("Entered optimize_from_loop with executeCounter = %d, reoptimizationRetryCounter = %u, optimizationDelayCounter = %u\n", codeBlock->executeCounter(), codeBlock->reoptimizationRetryCounter(), codeBlock->optimizationDelayCounter());
+#endif
+
+    if (codeBlock->hasOptimizedReplacement()) {
+#if ENABLE(JIT_VERBOSE_OSR)
+        printf("Considering loop OSR into %p(%p) with success/fail %u/%u.\n", codeBlock, codeBlock->replacement(), codeBlock->replacement()->speculativeSuccessCounter(), codeBlock->replacement()->speculativeFailCounter());
+#endif
+        if (codeBlock->replacement()->shouldReoptimizeFromLoopNow()) {
+#if ENABLE(JIT_VERBOSE_OSR)
+            printf("Triggering reoptimization of %p(%p) (in loop).\n", codeBlock, codeBlock->replacement());
+#endif
+            codeBlock->reoptimize(callFrame->globalData());
+            return;
+        }
+    } else {
         if (!codeBlock->shouldOptimizeNow()) {
 #if ENABLE(JIT_VERBOSE_OSR)
             printf("Delaying optimization for %p (in loop) because of insufficient profiling.\n", codeBlock);
@@ -1952,6 +1967,7 @@ DEFINE_STUB_FUNCTION(void, optimize_from_loop)
 #endif
 
         codeBlock->optimizeSoon();
+        optimizedCodeBlock->countSpeculationSuccess();
         STUB_SET_RETURN_ADDRESS(address);
         return;
     }
@@ -1960,6 +1976,30 @@ DEFINE_STUB_FUNCTION(void, optimize_from_loop)
     printf("Optimizing %p from loop succeeded, OSR failed.\n", codeBlock);
 #endif
 
+    // Count the OSR failure as a speculation failure. If this happens a lot, then
+    // reoptimize.
+    optimizedCodeBlock->countSpeculationFailure();
+    
+#if ENABLE(JIT_VERBOSE_OSR)
+    printf("Encountered loop OSR failure into %p(%p) with success/fail %u/%u.\n", codeBlock, codeBlock->replacement(), codeBlock->replacement()->speculativeSuccessCounter(), codeBlock->replacement()->speculativeFailCounter());
+#endif
+
+    // We are a lot more conservative about triggering reoptimization after OSR failure than
+    // before it. If we enter the optimize_from_loop trigger with a bucket full of fail
+    // already, then we really would like to reoptimize immediately. But this case covers
+    // something else: there weren't many (or any) speculation failures before, but we just
+    // failed to enter the speculative code because some variable had the wrong value or
+    // because the OSR code decided for any spurious reason that it did not want to OSR
+    // right now. So, we only trigger reoptimization only upon the more conservative (non-loop)
+    // reoptimization trigger.
+    if (optimizedCodeBlock->shouldReoptimizeNow()) {
+#if ENABLE(JIT_VERBOSE_OSR)
+        printf("Triggering reoptimization of %p(%p) (in loop after OSR fail).\n", codeBlock, codeBlock->replacement());
+#endif
+        codeBlock->reoptimize(callFrame->globalData());
+        return;
+    }
+
     // OSR failed this time, but it might succeed next time! Let the code run a bit
     // longer and then try again.
     codeBlock->optimizeAfterWarmUp();
@@ -1972,8 +2012,23 @@ DEFINE_STUB_FUNCTION(void, optimize_from_ret)
     CallFrame* callFrame = stackFrame.callFrame;
     CodeBlock* codeBlock = callFrame->codeBlock();
     
-    if (codeBlock->hasOptimizedReplacement())
+#if ENABLE(JIT_VERBOSE_OSR)
+    printf("Entered optimize_from_ret with executeCounter = %d, reoptimizationRetryCounter = %u, optimizationDelayCounter = %u\n", codeBlock->executeCounter(), codeBlock->reoptimizationRetryCounter(), codeBlock->optimizationDelayCounter());
+#endif
+
+    if (codeBlock->hasOptimizedReplacement()) {
+#if ENABLE(JIT_VERBOSE_OSR)
+        printf("Returning from old JIT call frame with optimized replacement %p(%p), with success/fail %u/%u.\n", codeBlock, codeBlock->replacement(), codeBlock->replacement()->speculativeSuccessCounter(), codeBlock->replacement()->speculativeFailCounter());
+#endif
+        if (codeBlock->replacement()->shouldReoptimizeNow()) {
+#if ENABLE(JIT_VERBOSE_OSR)
+            printf("Triggering reoptimization of %p(%p) (in return).\n", codeBlock, codeBlock->replacement());
+#endif
+            codeBlock->reoptimize(callFrame->globalData());
+        }
+
         return;
+    }
     
     if (!codeBlock->shouldOptimizeNow()) {
 #if ENABLE(JIT_VERBOSE_OSR)
index 292056795acd1aa6c0ec696dc7653898b0a8ee51..0c59319765a1ecd15674fdef06b71e4ddd6a5f1e 100644 (file)
@@ -84,6 +84,18 @@ DFG::Intrinsic NativeExecutable::intrinsic() const
 }
 #endif
 
+// Utility method used for jettisoning code blocks.
+template<typename T>
+static void jettisonCodeBlock(JSGlobalData& globalData, OwnPtr<T>& codeBlock)
+{
+    ASSERT(codeBlock->getJITType() != JITCode::BaselineJIT);
+    ASSERT(codeBlock->alternative());
+    OwnPtr<T> codeBlockToJettison = codeBlock.release();
+    codeBlock = static_pointer_cast<T>(codeBlockToJettison->releaseAlternative());
+    codeBlockToJettison->unlinkIncomingCalls();
+    globalData.heap.addJettisonedCodeBlock(static_pointer_cast<CodeBlock>(codeBlockToJettison.release()));
+}
+
 const ClassInfo ScriptExecutable::s_info = { "ScriptExecutable", &ExecutableBase::s_info, 0, 0 };
 
 const ClassInfo EvalExecutable::s_info = { "EvalExecutable", &ScriptExecutable::s_info, 0, 0 };
@@ -211,6 +223,13 @@ JSObject* EvalExecutable::compileInternal(ExecState* exec, ScopeChainNode* scope
     return 0;
 }
 
+void EvalExecutable::jettisonOptimizedCode(JSGlobalData& globalData)
+{
+    jettisonCodeBlock(globalData, m_evalCodeBlock);
+    m_jitCodeForCall = m_evalCodeBlock->getJITCode();
+    ASSERT(!m_jitCodeForCallWithArityCheck);
+}
+
 void EvalExecutable::visitChildren(SlotVisitor& visitor)
 {
     ASSERT_GC_OBJECT_INHERITS(this, &s_info);
@@ -330,6 +349,13 @@ JSObject* ProgramExecutable::compileInternal(ExecState* exec, ScopeChainNode* sc
     return 0;
 }
 
+void ProgramExecutable::jettisonOptimizedCode(JSGlobalData& globalData)
+{
+    jettisonCodeBlock(globalData, m_programCodeBlock);
+    m_jitCodeForCall = m_programCodeBlock->getJITCode();
+    ASSERT(!m_jitCodeForCallWithArityCheck);
+}
+
 void ProgramExecutable::unlinkCalls()
 {
 #if ENABLE(JIT)
@@ -534,6 +560,20 @@ JSObject* FunctionExecutable::compileForConstructInternal(ExecState* exec, Scope
     return 0;
 }
 
+void FunctionExecutable::jettisonOptimizedCodeForCall(JSGlobalData& globalData)
+{
+    jettisonCodeBlock(globalData, m_codeBlockForCall);
+    m_jitCodeForCall = m_codeBlockForCall->getJITCode();
+    m_jitCodeForCallWithArityCheck = m_codeBlockForCall->getJITCodeWithArityCheck();
+}
+
+void FunctionExecutable::jettisonOptimizedCodeForConstruct(JSGlobalData& globalData)
+{
+    jettisonCodeBlock(globalData, m_codeBlockForConstruct);
+    m_jitCodeForConstruct = m_codeBlockForConstruct->getJITCode();
+    m_jitCodeForConstructWithArityCheck = m_codeBlockForConstruct->getJITCodeWithArityCheck();
+}
+
 void FunctionExecutable::visitChildren(SlotVisitor& visitor)
 {
     ASSERT_GC_OBJECT_INHERITS(this, &s_info);
index bd8a97e7120af7a8513d8820bfb86a825845497c..3bbf00aa31606ee8ccafee4b643ec1a9ff5bdbde 100644 (file)
@@ -327,6 +327,8 @@ namespace JSC {
         }
         
         JSObject* compileOptimized(ExecState*, ScopeChainNode*);
+        
+        void jettisonOptimizedCode(JSGlobalData&);
 
         EvalCodeBlock& generatedBytecode()
         {
@@ -392,6 +394,8 @@ namespace JSC {
         }
 
         JSObject* compileOptimized(ExecState*, ScopeChainNode*);
+        
+        void jettisonOptimizedCode(JSGlobalData&);
 
         ProgramCodeBlock& generatedBytecode()
         {
@@ -475,6 +479,8 @@ namespace JSC {
         }
 
         JSObject* compileOptimizedForCall(ExecState*, ScopeChainNode*, ExecState* calleeArgsExec = 0);
+        
+        void jettisonOptimizedCodeForCall(JSGlobalData&);
 
         bool isGeneratedForCall() const
         {
@@ -498,6 +504,8 @@ namespace JSC {
         }
 
         JSObject* compileOptimizedForConstruct(ExecState*, ScopeChainNode*, ExecState* calleeArgsExec = 0);
+        
+        void jettisonOptimizedCodeForConstruct(JSGlobalData&);
 
         bool isGeneratedForConstruct() const
         {
@@ -538,6 +546,16 @@ namespace JSC {
             return compileOptimizedForConstruct(exec, scopeChainNode, exec);
         }
         
+        void jettisonOptimizedCodeFor(JSGlobalData& globalData, CodeSpecializationKind kind)
+        {
+            if (kind == CodeForCall) 
+                jettisonOptimizedCodeForCall(globalData);
+            else {
+                ASSERT(kind == CodeForConstruct);
+                jettisonOptimizedCodeForConstruct(globalData);
+            }
+        }
+        
         bool isGeneratedFor(CodeSpecializationKind kind)
         {
             if (kind == CodeForCall)
diff --git a/Source/JavaScriptCore/wtf/BitVector.h b/Source/JavaScriptCore/wtf/BitVector.h
new file mode 100644 (file)
index 0000000..b5df415
--- /dev/null
@@ -0,0 +1,244 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+ */
+
+#ifndef BitVector_h
+#define BitVector_h
+
+#include <algorithm>
+#include <string.h>
+#include <wtf/Assertions.h>
+#include <wtf/FastMalloc.h>
+#include <wtf/StdLibExtras.h>
+
+namespace WTF {
+
+// This is a space-efficient, resizeable bitvector class. In the common case it
+// occupies one word, but if necessary, it will inflate this one word to point
+// to a single chunk of out-of-line allocated storage to store an arbitrary number
+// of bits.
+//
+// - The bitvector needs to be resized manually (just call ensureSize()).
+//
+// - The bitvector remembers the bound of how many bits can be stored, but this
+//   may be slightly greater (by as much as some platform-specific constant)
+//   than the last argument passed to ensureSize().
+//
+// - Accesses ASSERT that you are within bounds.
+//
+// - Bits are not automatically initialized to zero.
+//
+// On the other hand, this BitVector class may not be the fastest around, since
+// it does conditionals on every get/set/clear. But it is great if you need to
+// juggle a lot of variable-length BitVectors and you're worried about wasting
+// space.
+
+class BitVector {
+public: 
+    BitVector()
+        : m_bitsOrPointer(makeInlineBits(0))
+    {
+    }
+    
+    BitVector(const BitVector& other)
+        : m_bitsOrPointer(makeInlineBits(0))
+    {
+        (*this) = other;
+    }
+    
+    ~BitVector()
+    {
+        if (isInline())
+            return;
+        OutOfLineBits::destroy(outOfLineBits());
+    }
+    
+    BitVector& operator=(const BitVector& other)
+    {
+        uintptr_t newBitsOrPointer;
+        if (other.isInline())
+            newBitsOrPointer = other.m_bitsOrPointer;
+        else {
+            OutOfLineBits* newOutOfLineBits = OutOfLineBits::create(other.size());
+            memcpy(newOutOfLineBits->bits(), other.bits(), byteCount(other.size()));
+            newBitsOrPointer = reinterpret_cast<uintptr_t>(newOutOfLineBits);
+        }
+        if (!isInline())
+            OutOfLineBits::destroy(outOfLineBits());
+        m_bitsOrPointer = newBitsOrPointer;
+        return *this;
+    }
+
+    size_t size() const
+    {
+        if (isInline())
+            return maxInlineBits();
+        return outOfLineBits()->numBits();
+    }
+
+    void ensureSize(size_t numBits)
+    {
+        if (numBits <= size())
+            return;
+        resizeOutOfLine(numBits);
+    }
+    
+    // Like ensureSize(), but supports reducing the size of the bitvector.
+    void resize(size_t numBits)
+    {
+        if (isInline())
+            return;
+        
+        if (numBits <= maxInlineBits()) {
+            OutOfLineBits* myOutOfLineBits = outOfLineBits();
+            m_bitsOrPointer = makeInlineBits(*myOutOfLineBits->bits());
+            OutOfLineBits::destroy(myOutOfLineBits);
+            return;
+        }
+        
+        resizeOutOfLine(numBits);
+    }
+    
+    void clearAll()
+    {
+        if (isInline())
+            m_bitsOrPointer = makeInlineBits(0);
+        else
+            memset(outOfLineBits()->bits(), 0, byteCount(size()));
+    }
+
+    bool get(size_t bit) const
+    {
+        ASSERT(bit < size());
+        return !!(bits()[bit >> bitsInPointer()] & (static_cast<uintptr_t>(1) << (bit & (bitsInPointer() - 1))));
+    }
+    
+    void set(size_t bit)
+    {
+        ASSERT(bit < size());
+        bits()[bit >> bitsInPointer()] |= (static_cast<uintptr_t>(1) << (bit & (bitsInPointer() - 1)));
+    }
+    
+    void clear(size_t bit)
+    {
+        ASSERT(bit < size());
+        bits()[bit >> bitsInPointer()] &= ~(static_cast<uintptr_t>(1) << (bit & (bitsInPointer() - 1)));
+    }
+    
+    void set(size_t bit, bool value)
+    {
+        if (value)
+            set(bit);
+        else
+            clear(bit);
+    }
+    
+private:
+    static unsigned bitsInPointer()
+    {
+        return sizeof(void*) << 3;
+    }
+    
+    static unsigned maxInlineBits()
+    {
+        return bitsInPointer() - 1;
+    }
+    
+    // This function relies on bitCount being a multiple of bitsInPointer()
+    static size_t byteCount(size_t bitCount)
+    {
+        ASSERT(!(bitCount & (bitsInPointer() - 1)));
+        return bitCount >> 3;
+    }
+    
+    static uintptr_t makeInlineBits(uintptr_t bits)
+    {
+        ASSERT(!(bits & (static_cast<uintptr_t>(1) << maxInlineBits())));
+        return bits | (static_cast<uintptr_t>(1) << maxInlineBits());
+    }
+    
+    class OutOfLineBits {
+    public:
+        size_t numBits() const { return m_numBits; }
+        size_t numWords() const { return (m_numBits + bitsInPointer() - 1) >> bitsInPointer(); }
+        uintptr_t* bits() { return reinterpret_cast<uintptr_t*>(this + 1); }
+        const uintptr_t* bits() const { return reinterpret_cast<const uintptr_t*>(this + 1); }
+        
+        static OutOfLineBits* create(size_t numBits)
+        {
+            numBits = (numBits + bitsInPointer() - 1) & ~bitsInPointer();
+            return new (fastMalloc(sizeof(OutOfLineBits) + (numBits >> bitsInPointer()))) OutOfLineBits(numBits);
+        }
+        
+        static void destroy(OutOfLineBits* outOfLineBits)
+        {
+            fastFree(outOfLineBits);
+        }
+
+    private:
+        OutOfLineBits(size_t numBits)
+            : m_numBits(numBits)
+        {
+        }
+        
+        size_t m_numBits;
+    };
+    
+    bool isInline() const { return m_bitsOrPointer >> maxInlineBits(); }
+    
+    const OutOfLineBits* outOfLineBits() const { return reinterpret_cast<const OutOfLineBits*>(m_bitsOrPointer); }
+    OutOfLineBits* outOfLineBits() { return reinterpret_cast<OutOfLineBits*>(m_bitsOrPointer); }
+    
+    void resizeOutOfLine(size_t numBits)
+    {
+        ASSERT(numBits > maxInlineBits());
+        OutOfLineBits* newOutOfLineBits = OutOfLineBits::create(numBits);
+        memcpy(newOutOfLineBits->bits(), bits(), byteCount(std::min(size(), numBits)));
+        if (!isInline())
+            OutOfLineBits::destroy(outOfLineBits());
+        m_bitsOrPointer = reinterpret_cast<uintptr_t>(newOutOfLineBits);
+    }
+    
+    uintptr_t* bits()
+    {
+        if (isInline())
+            return &m_bitsOrPointer;
+        return outOfLineBits()->bits();
+    }
+    
+    const uintptr_t* bits() const
+    {
+        if (isInline())
+            return &m_bitsOrPointer;
+        return outOfLineBits()->bits();
+    }
+    
+    uintptr_t m_bitsOrPointer;
+};
+
+} // namespace WTF
+
+using WTF::BitVector;
+
+#endif // BitVector_h