The executable allocator makes it difficult to free individual
authorfpizlo@apple.com <fpizlo@apple.com@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Sun, 11 Sep 2011 05:49:36 +0000 (05:49 +0000)
committerfpizlo@apple.com <fpizlo@apple.com@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Sun, 11 Sep 2011 05:49:36 +0000 (05:49 +0000)
chunks of executable memory
https://bugs.webkit.org/show_bug.cgi?id=66363

Reviewed by Oliver Hunt.

Introduced a best-fit, balanced-tree based allocator. The allocator
required a balanced tree that does not allocate memory and that
permits the removal of individual nodes directly (as opposed to by
key); neither AVLTree nor WebCore's PODRedBlackTree supported this.
Changed all references to executable code to use a reference counted
handle.

Source/JavaScriptCore:

* GNUmakefile.list.am:
* JavaScriptCore.exp:
* JavaScriptCore.vcproj/WTF/WTF.vcproj:
* JavaScriptCore.xcodeproj/project.pbxproj:
* assembler/AssemblerBuffer.h:
(JSC::AssemblerBuffer::executableCopy):
* assembler/LinkBuffer.h:
(JSC::LinkBuffer::LinkBuffer):
(JSC::LinkBuffer::finalizeCode):
(JSC::LinkBuffer::linkCode):
* assembler/MacroAssemblerCodeRef.h:
(JSC::MacroAssemblerCodeRef::MacroAssemblerCodeRef):
(JSC::MacroAssemblerCodeRef::createSelfManagedCodeRef):
(JSC::MacroAssemblerCodeRef::executableMemory):
(JSC::MacroAssemblerCodeRef::code):
(JSC::MacroAssemblerCodeRef::size):
(JSC::MacroAssemblerCodeRef::operator!):
* assembler/X86Assembler.h:
(JSC::X86Assembler::executableCopy):
(JSC::X86Assembler::X86InstructionFormatter::executableCopy):
* bytecode/CodeBlock.h:
* bytecode/Instruction.h:
* bytecode/StructureStubInfo.h:
* dfg/DFGJITCompiler.cpp:
(JSC::DFG::JITCompiler::compile):
(JSC::DFG::JITCompiler::compileFunction):
* dfg/DFGRepatch.cpp:
(JSC::DFG::generateProtoChainAccessStub):
(JSC::DFG::tryCacheGetByID):
(JSC::DFG::tryBuildGetByIDList):
(JSC::DFG::tryBuildGetByIDProtoList):
(JSC::DFG::tryCachePutByID):
* jit/ExecutableAllocator.cpp:
(JSC::ExecutableAllocator::initializeAllocator):
(JSC::ExecutableAllocator::ExecutableAllocator):
(JSC::ExecutableAllocator::allocate):
(JSC::ExecutableAllocator::committedByteCount):
(JSC::ExecutableAllocator::dumpProfile):
* jit/ExecutableAllocator.h:
(JSC::ExecutableAllocator::dumpProfile):
* jit/ExecutableAllocatorFixedVMPool.cpp:
(JSC::ExecutableAllocator::initializeAllocator):
(JSC::ExecutableAllocator::ExecutableAllocator):
(JSC::ExecutableAllocator::isValid):
(JSC::ExecutableAllocator::underMemoryPressure):
(JSC::ExecutableAllocator::allocate):
(JSC::ExecutableAllocator::committedByteCount):
(JSC::ExecutableAllocator::dumpProfile):
* jit/JIT.cpp:
(JSC::JIT::privateCompile):
* jit/JIT.h:
(JSC::JIT::compileCTIMachineTrampolines):
(JSC::JIT::compileCTINativeCall):
* jit/JITCode.h:
(JSC::JITCode::operator !):
(JSC::JITCode::addressForCall):
(JSC::JITCode::offsetOf):
(JSC::JITCode::execute):
(JSC::JITCode::start):
(JSC::JITCode::size):
(JSC::JITCode::getExecutableMemory):
(JSC::JITCode::HostFunction):
(JSC::JITCode::JITCode):
* jit/JITOpcodes.cpp:
(JSC::JIT::privateCompileCTIMachineTrampolines):
(JSC::JIT::privateCompileCTINativeCall):
* jit/JITOpcodes32_64.cpp:
(JSC::JIT::privateCompileCTIMachineTrampolines):
(JSC::JIT::privateCompileCTINativeCall):
* jit/JITPropertyAccess.cpp:
(JSC::JIT::stringGetByValStubGenerator):
(JSC::JIT::emitSlow_op_get_by_val):
(JSC::JIT::privateCompilePutByIdTransition):
(JSC::JIT::privateCompilePatchGetArrayLength):
(JSC::JIT::privateCompileGetByIdProto):
(JSC::JIT::privateCompileGetByIdSelfList):
(JSC::JIT::privateCompileGetByIdProtoList):
(JSC::JIT::privateCompileGetByIdChainList):
(JSC::JIT::privateCompileGetByIdChain):
* jit/JITPropertyAccess32_64.cpp:
(JSC::JIT::stringGetByValStubGenerator):
(JSC::JIT::emitSlow_op_get_by_val):
(JSC::JIT::privateCompilePutByIdTransition):
(JSC::JIT::privateCompilePatchGetArrayLength):
(JSC::JIT::privateCompileGetByIdProto):
(JSC::JIT::privateCompileGetByIdSelfList):
(JSC::JIT::privateCompileGetByIdProtoList):
(JSC::JIT::privateCompileGetByIdChainList):
(JSC::JIT::privateCompileGetByIdChain):
* jit/JITStubs.cpp:
(JSC::JITThunks::JITThunks):
(JSC::DEFINE_STUB_FUNCTION):
(JSC::getPolymorphicAccessStructureListSlot):
(JSC::JITThunks::ctiStub):
(JSC::JITThunks::hostFunctionStub):
* jit/JITStubs.h:
* jit/SpecializedThunkJIT.h:
(JSC::SpecializedThunkJIT::SpecializedThunkJIT):
(JSC::SpecializedThunkJIT::finalize):
* jit/ThunkGenerators.cpp:
(JSC::charCodeAtThunkGenerator):
(JSC::charAtThunkGenerator):
(JSC::fromCharCodeThunkGenerator):
(JSC::sqrtThunkGenerator):
(JSC::floorThunkGenerator):
(JSC::ceilThunkGenerator):
(JSC::roundThunkGenerator):
(JSC::expThunkGenerator):
(JSC::logThunkGenerator):
(JSC::absThunkGenerator):
(JSC::powThunkGenerator):
* jit/ThunkGenerators.h:
* runtime/Executable.h:
(JSC::NativeExecutable::create):
* runtime/InitializeThreading.cpp:
(JSC::initializeThreadingOnce):
* runtime/JSGlobalData.cpp:
(JSC::JSGlobalData::JSGlobalData):
(JSC::JSGlobalData::dumpSampleData):
* runtime/JSGlobalData.h:
(JSC::JSGlobalData::getCTIStub):
* wtf/CMakeLists.txt:
* wtf/MetaAllocator.cpp: Added.
(WTF::MetaAllocatorHandle::MetaAllocatorHandle):
(WTF::MetaAllocatorHandle::~MetaAllocatorHandle):
(WTF::MetaAllocatorHandle::shrink):
(WTF::MetaAllocator::MetaAllocator):
(WTF::MetaAllocator::allocate):
(WTF::MetaAllocator::currentStatistics):
(WTF::MetaAllocator::findAndRemoveFreeSpace):
(WTF::MetaAllocator::addFreeSpaceFromReleasedHandle):
(WTF::MetaAllocator::addFreshFreeSpace):
(WTF::MetaAllocator::debugFreeSpaceSize):
(WTF::MetaAllocator::addFreeSpace):
(WTF::MetaAllocator::incrementPageOccupancy):
(WTF::MetaAllocator::decrementPageOccupancy):
(WTF::MetaAllocator::roundUp):
(WTF::MetaAllocator::allocFreeSpaceNode):
(WTF::MetaAllocator::freeFreeSpaceNode):
(WTF::MetaAllocator::dumpProfile):
* wtf/MetaAllocator.h: Added.
(WTF::MetaAllocator::bytesAllocated):
(WTF::MetaAllocator::bytesReserved):
(WTF::MetaAllocator::bytesCommitted):
(WTF::MetaAllocator::dumpProfile):
(WTF::MetaAllocator::~MetaAllocator):
* wtf/MetaAllocatorHandle.h: Added.
* wtf/RedBlackTree.h: Added.
(WTF::RedBlackTree::Node::Node):
(WTF::RedBlackTree::Node::successor):
(WTF::RedBlackTree::Node::predecessor):
(WTF::RedBlackTree::Node::reset):
(WTF::RedBlackTree::Node::parent):
(WTF::RedBlackTree::Node::setParent):
(WTF::RedBlackTree::Node::left):
(WTF::RedBlackTree::Node::setLeft):
(WTF::RedBlackTree::Node::right):
(WTF::RedBlackTree::Node::setRight):
(WTF::RedBlackTree::Node::color):
(WTF::RedBlackTree::Node::setColor):
(WTF::RedBlackTree::RedBlackTree):
(WTF::RedBlackTree::insert):
(WTF::RedBlackTree::remove):
(WTF::RedBlackTree::findExact):
(WTF::RedBlackTree::findLeastGreaterThanOrEqual):
(WTF::RedBlackTree::findGreatestLessThanOrEqual):
(WTF::RedBlackTree::first):
(WTF::RedBlackTree::last):
(WTF::RedBlackTree::size):
(WTF::RedBlackTree::isEmpty):
(WTF::RedBlackTree::treeMinimum):
(WTF::RedBlackTree::treeMaximum):
(WTF::RedBlackTree::treeInsert):
(WTF::RedBlackTree::leftRotate):
(WTF::RedBlackTree::rightRotate):
(WTF::RedBlackTree::removeFixup):
* wtf/wtf.pri:
* yarr/YarrJIT.cpp:
(JSC::Yarr::YarrGenerator::compile):
* yarr/YarrJIT.h:
(JSC::Yarr::YarrCodeBlock::execute):
(JSC::Yarr::YarrCodeBlock::getAddr):

Source/JavaScriptGlue:

* ForwardingHeaders/wtf/MetaAllocatorHandle.h: Added.

Source/WebCore:

No new layout tests because behavior is not changed.  New API unit
tests:
Tests/WTF/RedBlackTree.cpp
Tests/WTF/MetaAllocator.cpp

* ForwardingHeaders/wtf/MetaAllocatorHandle.h: Added.

Tools:

* TestWebKitAPI/TestWebKitAPI.xcodeproj/project.pbxproj:
* TestWebKitAPI/Tests/WTF/MetaAllocator.cpp: Added.
(TestWebKitAPI::TEST_F):
* TestWebKitAPI/Tests/WTF/RedBlackTree.cpp: Added.
(TestWebKitAPI::Pair::findExact):
(TestWebKitAPI::Pair::remove):
(TestWebKitAPI::Pair::findLeastGreaterThanOrEqual):
(TestWebKitAPI::Pair::assertFoundAndRemove):
(TestWebKitAPI::Pair::assertEqual):
(TestWebKitAPI::Pair::assertSameValuesForKey):
(TestWebKitAPI::Pair::testDriver):
(TestWebKitAPI::TEST_F):

git-svn-id: https://svn.webkit.org/repository/webkit/trunk@94920 268f45cc-cd09-0410-ab3c-d52691b4dbfc

49 files changed:
Source/JavaScriptCore/ChangeLog
Source/JavaScriptCore/GNUmakefile.list.am
Source/JavaScriptCore/JavaScriptCore.exp
Source/JavaScriptCore/JavaScriptCore.vcproj/WTF/WTF.vcproj
Source/JavaScriptCore/JavaScriptCore.xcodeproj/project.pbxproj
Source/JavaScriptCore/assembler/AssemblerBuffer.h
Source/JavaScriptCore/assembler/LinkBuffer.h
Source/JavaScriptCore/assembler/MacroAssemblerCodeRef.h
Source/JavaScriptCore/assembler/X86Assembler.h
Source/JavaScriptCore/bytecode/CodeBlock.h
Source/JavaScriptCore/bytecode/Instruction.h
Source/JavaScriptCore/bytecode/StructureStubInfo.h
Source/JavaScriptCore/dfg/DFGJITCompiler.cpp
Source/JavaScriptCore/dfg/DFGRepatch.cpp
Source/JavaScriptCore/jit/ExecutableAllocator.cpp
Source/JavaScriptCore/jit/ExecutableAllocator.h
Source/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp
Source/JavaScriptCore/jit/JIT.cpp
Source/JavaScriptCore/jit/JIT.h
Source/JavaScriptCore/jit/JITCode.h
Source/JavaScriptCore/jit/JITOpcodes.cpp
Source/JavaScriptCore/jit/JITOpcodes32_64.cpp
Source/JavaScriptCore/jit/JITPropertyAccess.cpp
Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp
Source/JavaScriptCore/jit/JITStubs.cpp
Source/JavaScriptCore/jit/JITStubs.h
Source/JavaScriptCore/jit/SpecializedThunkJIT.h
Source/JavaScriptCore/jit/ThunkGenerators.cpp
Source/JavaScriptCore/jit/ThunkGenerators.h
Source/JavaScriptCore/runtime/Executable.h
Source/JavaScriptCore/runtime/InitializeThreading.cpp
Source/JavaScriptCore/runtime/JSGlobalData.cpp
Source/JavaScriptCore/runtime/JSGlobalData.h
Source/JavaScriptCore/wtf/CMakeLists.txt
Source/JavaScriptCore/wtf/MetaAllocator.cpp [new file with mode: 0644]
Source/JavaScriptCore/wtf/MetaAllocator.h [new file with mode: 0644]
Source/JavaScriptCore/wtf/MetaAllocatorHandle.h [new file with mode: 0644]
Source/JavaScriptCore/wtf/RedBlackTree.h [new file with mode: 0644]
Source/JavaScriptCore/wtf/wtf.pri
Source/JavaScriptCore/yarr/YarrJIT.cpp
Source/JavaScriptCore/yarr/YarrJIT.h
Source/JavaScriptGlue/ChangeLog
Source/JavaScriptGlue/ForwardingHeaders/wtf/MetaAllocatorHandle.h [new file with mode: 0644]
Source/WebCore/ChangeLog
Source/WebCore/ForwardingHeaders/wtf/MetaAllocatorHandle.h [new file with mode: 0644]
Tools/ChangeLog
Tools/TestWebKitAPI/TestWebKitAPI.xcodeproj/project.pbxproj
Tools/TestWebKitAPI/Tests/WTF/MetaAllocator.cpp [new file with mode: 0644]
Tools/TestWebKitAPI/Tests/WTF/RedBlackTree.cpp [new file with mode: 0644]

index b7f9709..66291d8 100644 (file)
@@ -1,3 +1,201 @@
+2011-09-08  Filip Pizlo  <fpizlo@apple.com>
+
+        The executable allocator makes it difficult to free individual
+        chunks of executable memory
+        https://bugs.webkit.org/show_bug.cgi?id=66363
+
+        Reviewed by Oliver Hunt.
+        
+        Introduced a best-fit, balanced-tree based allocator. The allocator
+        required a balanced tree that does not allocate memory and that
+        permits the removal of individual nodes directly (as opposed to by
+        key); neither AVLTree nor WebCore's PODRedBlackTree supported this.
+        Changed all references to executable code to use a reference counted
+        handle.
+
+        * GNUmakefile.list.am:
+        * JavaScriptCore.exp:
+        * JavaScriptCore.vcproj/WTF/WTF.vcproj:
+        * JavaScriptCore.xcodeproj/project.pbxproj:
+        * assembler/AssemblerBuffer.h:
+        (JSC::AssemblerBuffer::executableCopy):
+        * assembler/LinkBuffer.h:
+        (JSC::LinkBuffer::LinkBuffer):
+        (JSC::LinkBuffer::finalizeCode):
+        (JSC::LinkBuffer::linkCode):
+        * assembler/MacroAssemblerCodeRef.h:
+        (JSC::MacroAssemblerCodeRef::MacroAssemblerCodeRef):
+        (JSC::MacroAssemblerCodeRef::createSelfManagedCodeRef):
+        (JSC::MacroAssemblerCodeRef::executableMemory):
+        (JSC::MacroAssemblerCodeRef::code):
+        (JSC::MacroAssemblerCodeRef::size):
+        (JSC::MacroAssemblerCodeRef::operator!):
+        * assembler/X86Assembler.h:
+        (JSC::X86Assembler::executableCopy):
+        (JSC::X86Assembler::X86InstructionFormatter::executableCopy):
+        * bytecode/CodeBlock.h:
+        * bytecode/Instruction.h:
+        * bytecode/StructureStubInfo.h:
+        * dfg/DFGJITCompiler.cpp:
+        (JSC::DFG::JITCompiler::compile):
+        (JSC::DFG::JITCompiler::compileFunction):
+        * dfg/DFGRepatch.cpp:
+        (JSC::DFG::generateProtoChainAccessStub):
+        (JSC::DFG::tryCacheGetByID):
+        (JSC::DFG::tryBuildGetByIDList):
+        (JSC::DFG::tryBuildGetByIDProtoList):
+        (JSC::DFG::tryCachePutByID):
+        * jit/ExecutableAllocator.cpp:
+        (JSC::ExecutableAllocator::initializeAllocator):
+        (JSC::ExecutableAllocator::ExecutableAllocator):
+        (JSC::ExecutableAllocator::allocate):
+        (JSC::ExecutableAllocator::committedByteCount):
+        (JSC::ExecutableAllocator::dumpProfile):
+        * jit/ExecutableAllocator.h:
+        (JSC::ExecutableAllocator::dumpProfile):
+        * jit/ExecutableAllocatorFixedVMPool.cpp:
+        (JSC::ExecutableAllocator::initializeAllocator):
+        (JSC::ExecutableAllocator::ExecutableAllocator):
+        (JSC::ExecutableAllocator::isValid):
+        (JSC::ExecutableAllocator::underMemoryPressure):
+        (JSC::ExecutableAllocator::allocate):
+        (JSC::ExecutableAllocator::committedByteCount):
+        (JSC::ExecutableAllocator::dumpProfile):
+        * jit/JIT.cpp:
+        (JSC::JIT::privateCompile):
+        * jit/JIT.h:
+        (JSC::JIT::compileCTIMachineTrampolines):
+        (JSC::JIT::compileCTINativeCall):
+        * jit/JITCode.h:
+        (JSC::JITCode::operator !):
+        (JSC::JITCode::addressForCall):
+        (JSC::JITCode::offsetOf):
+        (JSC::JITCode::execute):
+        (JSC::JITCode::start):
+        (JSC::JITCode::size):
+        (JSC::JITCode::getExecutableMemory):
+        (JSC::JITCode::HostFunction):
+        (JSC::JITCode::JITCode):
+        * jit/JITOpcodes.cpp:
+        (JSC::JIT::privateCompileCTIMachineTrampolines):
+        (JSC::JIT::privateCompileCTINativeCall):
+        * jit/JITOpcodes32_64.cpp:
+        (JSC::JIT::privateCompileCTIMachineTrampolines):
+        (JSC::JIT::privateCompileCTINativeCall):
+        * jit/JITPropertyAccess.cpp:
+        (JSC::JIT::stringGetByValStubGenerator):
+        (JSC::JIT::emitSlow_op_get_by_val):
+        (JSC::JIT::privateCompilePutByIdTransition):
+        (JSC::JIT::privateCompilePatchGetArrayLength):
+        (JSC::JIT::privateCompileGetByIdProto):
+        (JSC::JIT::privateCompileGetByIdSelfList):
+        (JSC::JIT::privateCompileGetByIdProtoList):
+        (JSC::JIT::privateCompileGetByIdChainList):
+        (JSC::JIT::privateCompileGetByIdChain):
+        * jit/JITPropertyAccess32_64.cpp:
+        (JSC::JIT::stringGetByValStubGenerator):
+        (JSC::JIT::emitSlow_op_get_by_val):
+        (JSC::JIT::privateCompilePutByIdTransition):
+        (JSC::JIT::privateCompilePatchGetArrayLength):
+        (JSC::JIT::privateCompileGetByIdProto):
+        (JSC::JIT::privateCompileGetByIdSelfList):
+        (JSC::JIT::privateCompileGetByIdProtoList):
+        (JSC::JIT::privateCompileGetByIdChainList):
+        (JSC::JIT::privateCompileGetByIdChain):
+        * jit/JITStubs.cpp:
+        (JSC::JITThunks::JITThunks):
+        (JSC::DEFINE_STUB_FUNCTION):
+        (JSC::getPolymorphicAccessStructureListSlot):
+        (JSC::JITThunks::ctiStub):
+        (JSC::JITThunks::hostFunctionStub):
+        * jit/JITStubs.h:
+        * jit/SpecializedThunkJIT.h:
+        (JSC::SpecializedThunkJIT::SpecializedThunkJIT):
+        (JSC::SpecializedThunkJIT::finalize):
+        * jit/ThunkGenerators.cpp:
+        (JSC::charCodeAtThunkGenerator):
+        (JSC::charAtThunkGenerator):
+        (JSC::fromCharCodeThunkGenerator):
+        (JSC::sqrtThunkGenerator):
+        (JSC::floorThunkGenerator):
+        (JSC::ceilThunkGenerator):
+        (JSC::roundThunkGenerator):
+        (JSC::expThunkGenerator):
+        (JSC::logThunkGenerator):
+        (JSC::absThunkGenerator):
+        (JSC::powThunkGenerator):
+        * jit/ThunkGenerators.h:
+        * runtime/Executable.h:
+        (JSC::NativeExecutable::create):
+        * runtime/InitializeThreading.cpp:
+        (JSC::initializeThreadingOnce):
+        * runtime/JSGlobalData.cpp:
+        (JSC::JSGlobalData::JSGlobalData):
+        (JSC::JSGlobalData::dumpSampleData):
+        * runtime/JSGlobalData.h:
+        (JSC::JSGlobalData::getCTIStub):
+        * wtf/CMakeLists.txt:
+        * wtf/MetaAllocator.cpp: Added.
+        (WTF::MetaAllocatorHandle::MetaAllocatorHandle):
+        (WTF::MetaAllocatorHandle::~MetaAllocatorHandle):
+        (WTF::MetaAllocatorHandle::shrink):
+        (WTF::MetaAllocator::MetaAllocator):
+        (WTF::MetaAllocator::allocate):
+        (WTF::MetaAllocator::currentStatistics):
+        (WTF::MetaAllocator::findAndRemoveFreeSpace):
+        (WTF::MetaAllocator::addFreeSpaceFromReleasedHandle):
+        (WTF::MetaAllocator::addFreshFreeSpace):
+        (WTF::MetaAllocator::debugFreeSpaceSize):
+        (WTF::MetaAllocator::addFreeSpace):
+        (WTF::MetaAllocator::incrementPageOccupancy):
+        (WTF::MetaAllocator::decrementPageOccupancy):
+        (WTF::MetaAllocator::roundUp):
+        (WTF::MetaAllocator::allocFreeSpaceNode):
+        (WTF::MetaAllocator::freeFreeSpaceNode):
+        (WTF::MetaAllocator::dumpProfile):
+        * wtf/MetaAllocator.h: Added.
+        (WTF::MetaAllocator::bytesAllocated):
+        (WTF::MetaAllocator::bytesReserved):
+        (WTF::MetaAllocator::bytesCommitted):
+        (WTF::MetaAllocator::dumpProfile):
+        (WTF::MetaAllocator::~MetaAllocator):
+        * wtf/MetaAllocatorHandle.h: Added.
+        * wtf/RedBlackTree.h: Added.
+        (WTF::RedBlackTree::Node::Node):
+        (WTF::RedBlackTree::Node::successor):
+        (WTF::RedBlackTree::Node::predecessor):
+        (WTF::RedBlackTree::Node::reset):
+        (WTF::RedBlackTree::Node::parent):
+        (WTF::RedBlackTree::Node::setParent):
+        (WTF::RedBlackTree::Node::left):
+        (WTF::RedBlackTree::Node::setLeft):
+        (WTF::RedBlackTree::Node::right):
+        (WTF::RedBlackTree::Node::setRight):
+        (WTF::RedBlackTree::Node::color):
+        (WTF::RedBlackTree::Node::setColor):
+        (WTF::RedBlackTree::RedBlackTree):
+        (WTF::RedBlackTree::insert):
+        (WTF::RedBlackTree::remove):
+        (WTF::RedBlackTree::findExact):
+        (WTF::RedBlackTree::findLeastGreaterThanOrEqual):
+        (WTF::RedBlackTree::findGreatestLessThanOrEqual):
+        (WTF::RedBlackTree::first):
+        (WTF::RedBlackTree::last):
+        (WTF::RedBlackTree::size):
+        (WTF::RedBlackTree::isEmpty):
+        (WTF::RedBlackTree::treeMinimum):
+        (WTF::RedBlackTree::treeMaximum):
+        (WTF::RedBlackTree::treeInsert):
+        (WTF::RedBlackTree::leftRotate):
+        (WTF::RedBlackTree::rightRotate):
+        (WTF::RedBlackTree::removeFixup):
+        * wtf/wtf.pri:
+        * yarr/YarrJIT.cpp:
+        (JSC::Yarr::YarrGenerator::compile):
+        * yarr/YarrJIT.h:
+        (JSC::Yarr::YarrCodeBlock::execute):
+        (JSC::Yarr::YarrCodeBlock::getAddr):
+
 2011-09-10  Sam Weinig  <sam@webkit.org>
 
         Remove JSC::isZombie() function, it did nothing and was called by no-one.
index 8b1834b..7e7bf4a 100644 (file)
@@ -529,6 +529,9 @@ javascriptcore_sources += \
        Source/JavaScriptCore/wtf/MathExtras.h \
        Source/JavaScriptCore/wtf/MD5.cpp \
        Source/JavaScriptCore/wtf/MD5.h \
+       Source/JavaScriptCore/wtf/MetaAllocator.cpp \
+       Source/JavaScriptCore/wtf/MetaAllocator.h \
+       Source/JavaScriptCore/wtf/MetaAllocatorHandle.h \
        Source/JavaScriptCore/wtf/MessageQueue.h \
        Source/JavaScriptCore/wtf/NonCopyingSort.h \
        Source/JavaScriptCore/wtf/Noncopyable.h \
@@ -561,6 +564,7 @@ javascriptcore_sources += \
        Source/JavaScriptCore/wtf/RandomNumber.cpp \
        Source/JavaScriptCore/wtf/RandomNumber.h \
        Source/JavaScriptCore/wtf/RandomNumberSeed.h \
+       Source/JavaScriptCore/wtf/RedBlackTree.h \
        Source/JavaScriptCore/wtf/RefCounted.h \
        Source/JavaScriptCore/wtf/RefCountedLeakCounter.cpp \
        Source/JavaScriptCore/wtf/RefCountedLeakCounter.h \
index 6bbb75b..a6b2b58 100644 (file)
@@ -407,6 +407,11 @@ __ZN3WTF12createThreadEPFPvS0_ES0_PKc
 __ZN3WTF12detachThreadEj
 __ZN3WTF12isMainThreadEv
 __ZN3WTF12randomNumberEv
+__ZN3WTF13MetaAllocator17addFreshFreeSpaceEPvm
+__ZN3WTF13MetaAllocator17freeFreeSpaceNodeEPNS_12RedBlackTreeImPvE4NodeE
+__ZN3WTF13MetaAllocator18debugFreeSpaceSizeEv
+__ZN3WTF13MetaAllocator8allocateEm
+__ZN3WTF13MetaAllocatorC2Em
 __ZN3WTF13StringBuilder11reifyStringEv
 __ZN3WTF13StringBuilder11shrinkToFitEv
 __ZN3WTF13StringBuilder15reserveCapacityEj
@@ -441,6 +446,8 @@ __ZN3WTF18calculateUTCOffsetEv
 __ZN3WTF18charactersToDoubleEPKtmPbS2_
 __ZN3WTF18dateToDaysFrom1970Eiii
 __ZN3WTF18monthFromDayInYearEib
+__ZN3WTF19MetaAllocatorHandle6shrinkEm
+__ZN3WTF19MetaAllocatorHandleD1Ev
 __ZN3WTF19initializeThreadingEv
 __ZN3WTF20equalIgnoringNullityEPNS_10StringImplES1_
 __ZN3WTF20fastMallocStatisticsEv
@@ -521,6 +528,7 @@ __ZN3WTF8Internal21fastMallocMatchFailedEPv
 __ZN3WTF8fastFreeEPv
 __ZN3WTF8msToYearEd
 __ZN3WTF8nullAtomE
+__ZN3WTF8pageSizeEv
 __ZN3WTF8starAtomE
 __ZN3WTF8textAtomE
 __ZN3WTF9ByteArray6createEm
index 2c15a89..eec7cde 100644 (file)
                        >
                </File>
                <File
+                       RelativePath="..\..\wtf\MetaAllocator.cpp"
+                       >
+               </File>
+               <File
+                       RelativePath="..\..\wtf\MetaAllocator.h"
+                       >
+               </File>
+               <File
+                       RelativePath="..\..\wtf\MetaAllocatorHandle.h"
+                       >
+               </File>
+               <File
                        RelativePath="..\..\wtf\MD5.cpp"
                        >
                </File>
                        >
                </File>
                <File
+                       RelativePath="..\..\wtf\RedBlackTree.h"
+                       >
+               </File>
+               <File
                        RelativePath="..\..\wtf\RefCounted.h"
                        >
                </File>
index 27cbe1e..a0cce6a 100644 (file)
                0BF28A2911A33DC300638F84 /* SizeLimits.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0BF28A2811A33DC300638F84 /* SizeLimits.cpp */; };
                0F242DA713F3B1E8007ADD4C /* WeakReferenceHarvester.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F242DA513F3B1BB007ADD4C /* WeakReferenceHarvester.h */; settings = {ATTRIBUTES = (Private, ); }; };
                0F7700921402FF3C0078EB39 /* SamplingCounter.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0F7700911402FF280078EB39 /* SamplingCounter.cpp */; };
+               0F963B2713F753BB0002D9B2 /* RedBlackTree.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F963B2613F753990002D9B2 /* RedBlackTree.h */; settings = {ATTRIBUTES = (Private, ); }; };
+               0F963B2C13F853EC0002D9B2 /* MetaAllocator.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0F963B2B13F853C70002D9B2 /* MetaAllocator.cpp */; settings = {COMPILER_FLAGS = "-fno-strict-aliasing"; }; };
+               0F963B2D13F854020002D9B2 /* MetaAllocator.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F963B2A13F853BD0002D9B2 /* MetaAllocator.h */; settings = {ATTRIBUTES = (Private, ); }; };
+               0F963B2F13FC66BB0002D9B2 /* MetaAllocatorHandle.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F963B2E13FC66AE0002D9B2 /* MetaAllocatorHandle.h */; settings = {ATTRIBUTES = (Private, ); }; };
                0F963B3813FC6FE90002D9B2 /* ValueProfile.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F963B3613FC6FDE0002D9B2 /* ValueProfile.h */; settings = {ATTRIBUTES = (Private, ); }; };
                0FC8150A14043BF500CFA603 /* WriteBarrierSupport.h in Headers */ = {isa = PBXBuildFile; fileRef = 0FC8150914043BD200CFA603 /* WriteBarrierSupport.h */; settings = {ATTRIBUTES = (Private, ); }; };
                0FC8150B14043C0E00CFA603 /* WriteBarrierSupport.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0FC8150814043BCA00CFA603 /* WriteBarrierSupport.cpp */; };
                BC18C46B0E16F5CD00B34460 /* SymbolTable.h in Headers */ = {isa = PBXBuildFile; fileRef = 14A396A60CD2933100B5B4FF /* SymbolTable.h */; settings = {ATTRIBUTES = (Private, ); }; };
                BC18C46C0E16F5CD00B34460 /* TCPackedCache.h in Headers */ = {isa = PBXBuildFile; fileRef = 5DA479650CFBCF56009328A0 /* TCPackedCache.h */; };
                BC18C46D0E16F5CD00B34460 /* TCPageMap.h in Headers */ = {isa = PBXBuildFile; fileRef = 6541BD6E08E80A17002CBEE7 /* TCPageMap.h */; };
-               BC18C46E0E16F5CD00B34460 /* TCSpinLock.h in Headers */ = {isa = PBXBuildFile; fileRef = 6541BD6F08E80A17002CBEE7 /* TCSpinLock.h */; };
+               BC18C46E0E16F5CD00B34460 /* TCSpinLock.h in Headers */ = {isa = PBXBuildFile; fileRef = 6541BD6F08E80A17002CBEE7 /* TCSpinLock.h */; settings = {ATTRIBUTES = (Private, ); }; };
                BC18C46F0E16F5CD00B34460 /* TCSystemAlloc.h in Headers */ = {isa = PBXBuildFile; fileRef = 6541BD7108E80A17002CBEE7 /* TCSystemAlloc.h */; };
                BC18C4700E16F5CD00B34460 /* Threading.h in Headers */ = {isa = PBXBuildFile; fileRef = E1EE79220D6C95CD00FEA3BA /* Threading.h */; settings = {ATTRIBUTES = (Private, ); }; };
                BC18C4710E16F5CD00B34460 /* ThreadSpecific.h in Headers */ = {isa = PBXBuildFile; fileRef = E1B7C8BD0DA3A3360074B0DC /* ThreadSpecific.h */; settings = {ATTRIBUTES = (Private, ); }; };
                0F242DA513F3B1BB007ADD4C /* WeakReferenceHarvester.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = WeakReferenceHarvester.h; sourceTree = "<group>"; };
                0F77008E1402FDD60078EB39 /* SamplingCounter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = SamplingCounter.h; sourceTree = "<group>"; };
                0F7700911402FF280078EB39 /* SamplingCounter.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = SamplingCounter.cpp; sourceTree = "<group>"; };
+               0F963B2613F753990002D9B2 /* RedBlackTree.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = RedBlackTree.h; sourceTree = "<group>"; };
+               0F963B2A13F853BD0002D9B2 /* MetaAllocator.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = MetaAllocator.h; sourceTree = "<group>"; };
+               0F963B2B13F853C70002D9B2 /* MetaAllocator.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = MetaAllocator.cpp; sourceTree = "<group>"; };
+               0F963B2E13FC66AE0002D9B2 /* MetaAllocatorHandle.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = MetaAllocatorHandle.h; sourceTree = "<group>"; };
                0F963B3613FC6FDE0002D9B2 /* ValueProfile.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ValueProfile.h; sourceTree = "<group>"; };
                0FC8150814043BCA00CFA603 /* WriteBarrierSupport.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = WriteBarrierSupport.cpp; sourceTree = "<group>"; };
                0FC8150914043BD200CFA603 /* WriteBarrierSupport.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = WriteBarrierSupport.h; sourceTree = "<group>"; };
                65162EF108E6A21C007556CD /* wtf */ = {
                        isa = PBXGroup;
                        children = (
+                               0F963B2E13FC66AE0002D9B2 /* MetaAllocatorHandle.h */,
+                               0F963B2B13F853C70002D9B2 /* MetaAllocator.cpp */,
+                               0F963B2A13F853BD0002D9B2 /* MetaAllocator.h */,
+                               0F963B2613F753990002D9B2 /* RedBlackTree.h */,
                                C2EE599D13FC972A009CEAFE /* DecimalNumber.cpp */,
                                C2EE599E13FC972A009CEAFE /* DecimalNumber.h */,
                                C22C524813FAF6EF00B7DC0D /* dtoa */,
                                86D3B2C410156BDE002865E7 /* ARMAssembler.h in Headers */,
                                86ADD1450FDDEA980006EEC2 /* ARMv7Assembler.h in Headers */,
                                BC18C3E60E16F5CD00B34460 /* ArrayConstructor.h in Headers */,
+                               BC18C46E0E16F5CD00B34460 /* TCSpinLock.h in Headers */,
+                               0F963B2F13FC66BB0002D9B2 /* MetaAllocatorHandle.h in Headers */,
+                               0F963B2D13F854020002D9B2 /* MetaAllocator.h in Headers */,
+                               0F963B2713F753BB0002D9B2 /* RedBlackTree.h in Headers */,
                                0FC815151405119B00CFA603 /* VTableSpectrum.h in Headers */,
                                C22B31B9140577D700DB475A /* SamplingCounter.h in Headers */,
                                0FC8150A14043BF500CFA603 /* WriteBarrierSupport.h in Headers */,
                                A784A26411D16622005776AC /* SyntaxChecker.h in Headers */,
                                BC18C46C0E16F5CD00B34460 /* TCPackedCache.h in Headers */,
                                BC18C46D0E16F5CD00B34460 /* TCPageMap.h in Headers */,
-                               BC18C46E0E16F5CD00B34460 /* TCSpinLock.h in Headers */,
                                BC18C46F0E16F5CD00B34460 /* TCSystemAlloc.h in Headers */,
                                971EDEA61169E0D3005E4262 /* Terminator.h in Headers */,
                                F3BD31ED126735770065467F /* TextPosition.h in Headers */,
                        isa = PBXSourcesBuildPhase;
                        buildActionMask = 2147483647;
                        files = (
+                               0F963B2C13F853EC0002D9B2 /* MetaAllocator.cpp in Sources */,
                                0FD82E2114172CE300179C94 /* DFGCapabilities.cpp in Sources */,
                                0FD3C82514115D4000FD81CB /* DFGPropagator.cpp in Sources */,
                                0FD3C82614115D4000FD81CB /* DFGDriver.cpp in Sources */,
index b98503d..05a9307 100644 (file)
@@ -28,6 +28,7 @@
 
 #if ENABLE(ASSEMBLER)
 
+#include "JSGlobalData.h"
 #include "stdint.h"
 #include <string.h>
 #include <jit/ExecutableAllocator.h>
@@ -128,19 +129,21 @@ namespace JSC {
             return AssemblerLabel(m_index);
         }
 
-        void* executableCopy(JSGlobalData& globalData, ExecutablePool* allocator)
+        PassRefPtr<ExecutableMemoryHandle> executableCopy(JSGlobalData& globalData)
         {
             if (!m_index)
                 return 0;
 
-            void* result = allocator->alloc(globalData, m_index);
+            RefPtr<ExecutableMemoryHandle> result = globalData.executableAllocator.allocate(globalData, m_index);
 
             if (!result)
                 return 0;
 
-            ExecutableAllocator::makeWritable(result, m_index);
+            ExecutableAllocator::makeWritable(result->start(), result->sizeInBytes());
 
-            return memcpy(result, m_buffer, m_index);
+            memcpy(result->start(), m_buffer, m_index);
+            
+            return result.release();
         }
 
         void rewindToLabel(AssemblerLabel label)
index 0255b29..274f1b6 100644 (file)
@@ -69,22 +69,8 @@ class LinkBuffer {
 #endif
 
 public:
-    LinkBuffer(JSGlobalData& globalData, MacroAssembler* masm, PassRefPtr<ExecutablePool> executablePool)
-        : m_executablePool(executablePool)
-        , m_size(0)
-        , m_code(0)
-        , m_assembler(masm)
-        , m_globalData(&globalData)
-#ifndef NDEBUG
-        , m_completed(false)
-#endif
-    {
-        linkCode();
-    }
-
-    LinkBuffer(JSGlobalData& globalData, MacroAssembler* masm, ExecutableAllocator& allocator)
-        : m_executablePool(allocator.poolForSize(globalData, masm->m_assembler.codeSize()))
-        , m_size(0)
+    LinkBuffer(JSGlobalData& globalData, MacroAssembler* masm)
+        : m_size(0)
         , m_code(0)
         , m_assembler(masm)
         , m_globalData(&globalData)
@@ -185,14 +171,7 @@ public:
     {
         performFinalization();
 
-        return CodeRef(m_code, m_executablePool, m_size);
-    }
-
-    CodeLocationLabel finalizeCodeAddendum()
-    {
-        performFinalization();
-
-        return CodeLocationLabel(code());
+        return CodeRef(m_executableMemory);
     }
 
     CodePtr trampolineAt(Label label)
@@ -232,14 +211,19 @@ private:
     {
         ASSERT(!m_code);
 #if !ENABLE(BRANCH_COMPACTION)
-        m_code = m_assembler->m_assembler.executableCopy(*m_globalData, m_executablePool.get());
+        m_executableMemory = m_assembler->m_assembler.executableCopy(*m_globalData);
+        if (!m_executableMemory)
+            return;
+        m_code = m_executableMemory->start();
         m_size = m_assembler->m_assembler.codeSize();
         ASSERT(m_code);
 #else
         size_t initialSize = m_assembler->m_assembler.codeSize();
-        m_code = (uint8_t*)m_executablePool->alloc(*m_globalData, initialSize);
-        if (!m_code)
+        m_executableMemory = m_globalData->executableAllocator.allocate(*m_globalData, initialSize);
+        if (!m_executableMemory)
             return;
+        m_code = (uint8_t*)m_executableMemory->start();
+        ASSERT(m_code);
         ExecutableAllocator::makeWritable(m_code, initialSize);
         uint8_t* inData = (uint8_t*)m_assembler->unlinkedCode();
         uint8_t* outData = reinterpret_cast<uint8_t*>(m_code);
@@ -297,7 +281,7 @@ private:
 
         jumpsToLink.clear();
         m_size = writePtr + initialSize - readPtr;
-        m_executablePool->tryShrink(m_code, initialSize, m_size);
+        m_executableMemory->shrink(m_size);
 
 #if DUMP_LINK_STATISTICS
         dumpLinkStatistics(m_code, initialSize, m_size);
@@ -366,7 +350,7 @@ private:
     }
 #endif
     
-    RefPtr<ExecutablePool> m_executablePool;
+    RefPtr<ExecutableMemoryHandle> m_executableMemory;
     size_t m_size;
     void* m_code;
     MacroAssembler* m_assembler;
index 6d47cb9..806ba3d 100644 (file)
@@ -199,22 +199,59 @@ private:
 // pointer to the code, and a ref pointer to the pool from within which it
 // was allocated.
 class MacroAssemblerCodeRef {
+private:
+    // This is private because it's dangerous enough that we want uses of it
+    // to be easy to find - hence the static create method below.
+    explicit MacroAssemblerCodeRef(MacroAssemblerCodePtr codePtr)
+        : m_codePtr(codePtr)
+    {
+        ASSERT(m_codePtr);
+    }
+
 public:
     MacroAssemblerCodeRef()
-        : m_size(0)
     {
     }
 
-    MacroAssemblerCodeRef(void* code, PassRefPtr<ExecutablePool> executablePool, size_t size)
-        : m_code(code)
-        , m_executablePool(executablePool)
-        , m_size(size)
+    MacroAssemblerCodeRef(PassRefPtr<ExecutableMemoryHandle> executableMemory)
+        : m_codePtr(executableMemory->start())
+        , m_executableMemory(executableMemory)
     {
+        ASSERT(m_executableMemory->isManaged());
+        ASSERT(m_executableMemory->start());
+        ASSERT(m_codePtr);
     }
+    
+    // Use this only when you know that the codePtr refers to code that is
+    // already being kept alive through some other means. Typically this means
+    // that codePtr is immortal.
+    static MacroAssemblerCodeRef createSelfManagedCodeRef(MacroAssemblerCodePtr codePtr)
+    {
+        return MacroAssemblerCodeRef(codePtr);
+    }
+    
+    ExecutableMemoryHandle* executableMemory() const
+    {
+        return m_executableMemory.get();
+    }
+    
+    MacroAssemblerCodePtr code() const
+    {
+        return m_codePtr;
+    }
+    
+    size_t size() const
+    {
+        if (!m_executableMemory)
+            return 0;
+        return m_executableMemory->sizeInBytes();
+    }
+    
+    bool operator!() const { return !m_codePtr; }
 
-    MacroAssemblerCodePtr m_code;
-    RefPtr<ExecutablePool> m_executablePool;
-    size_t m_size;
+private:
+    MacroAssemblerCodePtr m_codePtr;
+    RefPtr<ExecutableMemoryHandle> m_executableMemory;
 };
 
 } // namespace JSC
index c06a131..3f5c0ad 100644 (file)
@@ -1595,9 +1595,9 @@ public:
         return b.m_offset - a.m_offset;
     }
     
-    void* executableCopy(JSGlobalData& globalData, ExecutablePool* allocator)
+    PassRefPtr<ExecutableMemoryHandle> executableCopy(JSGlobalData& globalData)
     {
-        return m_formatter.executableCopy(globalData, allocator);
+        return m_formatter.executableCopy(globalData);
     }
 
     void rewindToLabel(AssemblerLabel rewindTo) { m_formatter.rewindToLabel(rewindTo); }
@@ -1939,9 +1939,9 @@ private:
         bool isAligned(int alignment) const { return m_buffer.isAligned(alignment); }
         void* data() const { return m_buffer.data(); }
 
-        void* executableCopy(JSGlobalData& globalData, ExecutablePool* allocator)
+        PassRefPtr<ExecutableMemoryHandle> executableCopy(JSGlobalData& globalData)
         {
-            return m_buffer.executableCopy(globalData, allocator);
+            return m_buffer.executableCopy(globalData);
         }
 
         void rewindToLabel(AssemblerLabel rewindTo) { m_buffer.rewindToLabel(rewindTo); }
index e5df46b..068cc69 100644 (file)
@@ -324,7 +324,7 @@ namespace JSC {
         }
         JITCode& getJITCode() { return m_jitCode; }
         JITCode::JITType getJITType() { return m_jitCode.jitType(); }
-        ExecutablePool* executablePool() { return getJITCode().getExecutablePool(); }
+        ExecutableMemoryHandle* executableMemory() { return getJITCode().getExecutableMemory(); }
         virtual JSObject* compileOptimized(ExecState*, ScopeChainNode*) = 0;
         virtual CodeBlock* replacement() = 0;
         virtual bool canCompileWithDFG() = 0;
index 71666e2..c2d02fa 100644 (file)
@@ -50,7 +50,7 @@ namespace JSC {
     class StructureChain;
 
 #if ENABLE(JIT)
-    typedef CodeLocationLabel PolymorphicAccessStructureListStubRoutineType;
+    typedef MacroAssemblerCodeRef PolymorphicAccessStructureListStubRoutineType;
 
     // Structure used by op_get_by_id_self_list and op_get_by_id_proto_list instruction to hold data off the main opcode stream.
     struct PolymorphicAccessStructureList {
index 3d4529f..fe9fcbf 100644 (file)
@@ -173,7 +173,7 @@ namespace JSC {
             } putByIdReplace;
         } u;
 
-        CodeLocationLabel stubRoutine;
+        MacroAssemblerCodeRef stubRoutine;
         CodeLocationCall callReturnLocation;
         CodeLocationLabel hotPathBegin;
     };
index 79d5bd0..73af239 100644 (file)
@@ -955,7 +955,7 @@ void JITCompiler::compile(JITCode& entry)
     // Generate the body of the program.
     compileBody();
     // Link
-    LinkBuffer linkBuffer(*m_globalData, this, m_globalData->executableAllocator);
+    LinkBuffer linkBuffer(*m_globalData, this);
     link(linkBuffer);
     entry = JITCode(linkBuffer.finalizeCode(), JITCode::DFGJIT);
 }
@@ -1013,7 +1013,7 @@ void JITCompiler::compileFunction(JITCode& entry, MacroAssemblerCodePtr& entryWi
 
 
     // === Link ===
-    LinkBuffer linkBuffer(*m_globalData, this, m_globalData->executableAllocator);
+    LinkBuffer linkBuffer(*m_globalData, this);
     link(linkBuffer);
     
     // FIXME: switch the register file check & arity check over to DFGOpertaion style calls, not JIT stubs.
index 41dafca..d082b4c 100644 (file)
@@ -93,7 +93,7 @@ static void linkRestoreScratch(LinkBuffer& patchBuffer, bool needToRestoreScratc
     linkRestoreScratch(patchBuffer, needToRestoreScratch, success, fail, failureCases, stubInfo.callReturnLocation.labelAtOffset(stubInfo.deltaCallToDone), stubInfo.callReturnLocation.labelAtOffset(stubInfo.deltaCallToSlowCase));
 }
 
-static void generateProtoChainAccessStub(ExecState* exec, StructureStubInfo& stubInfo, StructureChain* chain, size_t count, size_t offset, Structure* structure, CodeLocationLabel successLabel, CodeLocationLabel slowCaseLabel, CodeLocationLabel& stubRoutine)
+static void generateProtoChainAccessStub(ExecState* exec, StructureStubInfo& stubInfo, StructureChain* chain, size_t count, size_t offset, Structure* structure, CodeLocationLabel successLabel, CodeLocationLabel slowCaseLabel, MacroAssemblerCodeRef& stubRoutine)
 {
     JSGlobalData* globalData = &exec->globalData();
 
@@ -133,11 +133,11 @@ static void generateProtoChainAccessStub(ExecState* exec, StructureStubInfo& stu
     
     emitRestoreScratch(stubJit, needToRestoreScratch, scratchGPR, success, fail, failureCases);
     
-    LinkBuffer patchBuffer(*globalData, &stubJit, exec->codeBlock()->executablePool());
+    LinkBuffer patchBuffer(*globalData, &stubJit);
     
     linkRestoreScratch(patchBuffer, needToRestoreScratch, success, fail, failureCases, successLabel, slowCaseLabel);
     
-    stubRoutine = patchBuffer.finalizeCodeAddendum();
+    stubRoutine = patchBuffer.finalizeCode();
 }
 
 static bool tryCacheGetByID(ExecState* exec, JSValue baseValue, const Identifier& propertyName, const PropertySlot& slot, StructureStubInfo& stubInfo)
@@ -176,15 +176,14 @@ static bool tryCacheGetByID(ExecState* exec, JSValue baseValue, const Identifier
         
         emitRestoreScratch(stubJit, needToRestoreScratch, scratchGPR, success, fail, failureCases);
         
-        LinkBuffer patchBuffer(*globalData, &stubJit, codeBlock->executablePool());
+        LinkBuffer patchBuffer(*globalData, &stubJit);
         
         linkRestoreScratch(patchBuffer, needToRestoreScratch, stubInfo, success, fail, failureCases);
         
-        CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
-        stubInfo.stubRoutine = entryLabel;
+        stubInfo.stubRoutine = patchBuffer.finalizeCode();
         
         RepatchBuffer repatchBuffer(codeBlock);
-        repatchBuffer.relink(stubInfo.callReturnLocation.jumpAtOffset(stubInfo.deltaCallToStructCheck), entryLabel);
+        repatchBuffer.relink(stubInfo.callReturnLocation.jumpAtOffset(stubInfo.deltaCallToStructCheck), CodeLocationLabel(stubInfo.stubRoutine.code()));
         repatchBuffer.relink(stubInfo.callReturnLocation, operationGetById);
         
         return true;
@@ -231,7 +230,7 @@ static bool tryCacheGetByID(ExecState* exec, JSValue baseValue, const Identifier
     generateProtoChainAccessStub(exec, stubInfo, prototypeChain, count, offset, structure, stubInfo.callReturnLocation.labelAtOffset(stubInfo.deltaCallToDone), stubInfo.callReturnLocation.labelAtOffset(stubInfo.deltaCallToSlowCase), stubInfo.stubRoutine);
     
     RepatchBuffer repatchBuffer(codeBlock);
-    repatchBuffer.relink(stubInfo.callReturnLocation.jumpAtOffset(stubInfo.deltaCallToStructCheck), stubInfo.stubRoutine);
+    repatchBuffer.relink(stubInfo.callReturnLocation.jumpAtOffset(stubInfo.deltaCallToStructCheck), CodeLocationLabel(stubInfo.stubRoutine.code()));
     repatchBuffer.relink(stubInfo.callReturnLocation, operationGetByIdProtoBuildList);
     
     stubInfo.initGetByIdChain(*globalData, codeBlock->ownerExecutable(), structure, prototypeChain);
@@ -324,7 +323,7 @@ static bool tryBuildGetByIDList(ExecState* exec, JSValue baseValue, const Identi
     
     if (stubInfo.accessType == access_get_by_id_self) {
         ASSERT(!stubInfo.stubRoutine);
-        polymorphicStructureList = new PolymorphicAccessStructureList(*globalData, codeBlock->ownerExecutable(), stubInfo.callReturnLocation.labelAtOffset(stubInfo.deltaCallToSlowCase), stubInfo.u.getByIdSelf.baseObjectStructure.get());
+        polymorphicStructureList = new PolymorphicAccessStructureList(*globalData, codeBlock->ownerExecutable(), MacroAssemblerCodeRef::createSelfManagedCodeRef(stubInfo.callReturnLocation.labelAtOffset(stubInfo.deltaCallToSlowCase)), stubInfo.u.getByIdSelf.baseObjectStructure.get());
         stubInfo.initGetByIdSelfList(polymorphicStructureList, 1);
     } else {
         polymorphicStructureList = stubInfo.u.getByIdSelfList.structureList;
@@ -350,21 +349,21 @@ static bool tryBuildGetByIDList(ExecState* exec, JSValue baseValue, const Identi
         
         MacroAssembler::Jump success = stubJit.jump();
         
-        LinkBuffer patchBuffer(*globalData, &stubJit, codeBlock->executablePool());
+        LinkBuffer patchBuffer(*globalData, &stubJit);
         
-        CodeLocationLabel lastProtoBegin = polymorphicStructureList->list[listIndex - 1].stubRoutine;
+        CodeLocationLabel lastProtoBegin = CodeLocationLabel(polymorphicStructureList->list[listIndex - 1].stubRoutine.code());
         ASSERT(!!lastProtoBegin);
         
         patchBuffer.link(wrongStruct, lastProtoBegin);
         patchBuffer.link(success, stubInfo.callReturnLocation.labelAtOffset(stubInfo.deltaCallToDone));
         
-        CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
+        MacroAssemblerCodeRef stubRoutine = patchBuffer.finalizeCode();
         
-        polymorphicStructureList->list[listIndex].set(*globalData, codeBlock->ownerExecutable(), entryLabel, structure);
+        polymorphicStructureList->list[listIndex].set(*globalData, codeBlock->ownerExecutable(), stubRoutine, structure);
         
         CodeLocationJump jumpLocation = stubInfo.callReturnLocation.jumpAtOffset(stubInfo.deltaCallToStructCheck);
         RepatchBuffer repatchBuffer(codeBlock);
-        repatchBuffer.relink(jumpLocation, entryLabel);
+        repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubRoutine.code()));
         
         if (listIndex < (POLYMORPHIC_LIST_CACHE_SIZE - 1))
             return true;
@@ -408,7 +407,7 @@ static bool tryBuildGetByIDProtoList(ExecState* exec, JSValue baseValue, const I
     if (stubInfo.accessType == access_get_by_id_chain) {
         ASSERT(!!stubInfo.stubRoutine);
         polymorphicStructureList = new PolymorphicAccessStructureList(*globalData, codeBlock->ownerExecutable(), stubInfo.stubRoutine, stubInfo.u.getByIdChain.baseObjectStructure.get(), stubInfo.u.getByIdChain.chain.get());
-        stubInfo.stubRoutine = CodeLocationLabel();
+        stubInfo.stubRoutine = MacroAssemblerCodeRef();
         stubInfo.initGetByIdProtoList(polymorphicStructureList, 1);
     } else {
         ASSERT(stubInfo.accessType = access_get_by_id_proto_list);
@@ -419,18 +418,18 @@ static bool tryBuildGetByIDProtoList(ExecState* exec, JSValue baseValue, const I
     if (listIndex < POLYMORPHIC_LIST_CACHE_SIZE) {
         stubInfo.u.getByIdProtoList.listSize++;
         
-        CodeLocationLabel lastProtoBegin = polymorphicStructureList->list[listIndex - 1].stubRoutine;
+        CodeLocationLabel lastProtoBegin = CodeLocationLabel(polymorphicStructureList->list[listIndex - 1].stubRoutine.code());
         ASSERT(!!lastProtoBegin);
 
-        CodeLocationLabel entryLabel;
+        MacroAssemblerCodeRef stubRoutine;
         
-        generateProtoChainAccessStub(exec, stubInfo, prototypeChain, count, offset, structure, stubInfo.callReturnLocation.labelAtOffset(stubInfo.deltaCallToDone), lastProtoBegin, entryLabel);
+        generateProtoChainAccessStub(exec, stubInfo, prototypeChain, count, offset, structure, stubInfo.callReturnLocation.labelAtOffset(stubInfo.deltaCallToDone), lastProtoBegin, stubRoutine);
         
-        polymorphicStructureList->list[listIndex].set(*globalData, codeBlock->ownerExecutable(), entryLabel, structure);
+        polymorphicStructureList->list[listIndex].set(*globalData, codeBlock->ownerExecutable(), stubRoutine, structure);
         
         CodeLocationJump jumpLocation = stubInfo.callReturnLocation.jumpAtOffset(stubInfo.deltaCallToStructCheck);
         RepatchBuffer repatchBuffer(codeBlock);
-        repatchBuffer.relink(jumpLocation, entryLabel);
+        repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubRoutine.code()));
         
         if (listIndex < (POLYMORPHIC_LIST_CACHE_SIZE - 1))
             return true;
@@ -548,18 +547,17 @@ static bool tryCachePutByID(ExecState* exec, JSValue baseValue, const Identifier
             } else
                 success = stubJit.jump();
             
-            LinkBuffer patchBuffer(*globalData, &stubJit, codeBlock->executablePool());
+            LinkBuffer patchBuffer(*globalData, &stubJit);
             patchBuffer.link(success, stubInfo.callReturnLocation.labelAtOffset(stubInfo.deltaCallToDone));
             if (needToRestoreScratch)
                 patchBuffer.link(failure, stubInfo.callReturnLocation.labelAtOffset(stubInfo.deltaCallToSlowCase));
             else
                 patchBuffer.link(failureCases, stubInfo.callReturnLocation.labelAtOffset(stubInfo.deltaCallToSlowCase));
             
-            CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
-            stubInfo.stubRoutine = entryLabel;
+            stubInfo.stubRoutine = patchBuffer.finalizeCode();
             
             RepatchBuffer repatchBuffer(codeBlock);
-            repatchBuffer.relink(stubInfo.callReturnLocation.jumpAtOffset(stubInfo.deltaCallToStructCheck), entryLabel);
+            repatchBuffer.relink(stubInfo.callReturnLocation.jumpAtOffset(stubInfo.deltaCallToStructCheck), CodeLocationLabel(stubInfo.stubRoutine.code()));
             repatchBuffer.relink(stubInfo.callReturnLocation, appropriatePutByIdFunction(slot, putKind));
             
             stubInfo.initPutByIdTransition(*globalData, codeBlock->ownerExecutable(), oldStructure, structure, prototypeChain);
index ab6f1fd..8d88822 100644 (file)
 
 #include "ExecutableAllocator.h"
 
+#if ENABLE(EXECUTABLE_ALLOCATOR_DEMAND)
+#include <wtf/MetaAllocator.h>
+#include <wtf/PageReservation.h>
+#include <wtf/VMTags.h>
+#endif
+
 #if ENABLE(ASSEMBLER)
 
+using namespace WTF;
+
 namespace JSC {
 
 #if ENABLE(EXECUTABLE_ALLOCATOR_DEMAND)
 
-ExecutablePool::Allocation ExecutablePool::systemAlloc(size_t size)
+class DemandExecutableAllocator: public MetaAllocator {
+public:
+    DemandExecutableAllocator()
+        : MetaAllocator(32) // round up all allocations to 32 bytes
+    {
+        // Don't preallocate any memory here.
+    }
+    
+    virtual ~DemandExecutableAllocator()
+    {
+        for (unsigned i = 0; i < reservations.size(); ++i)
+            reservations.at(i).deallocate();
+    }
+
+protected:
+    virtual void* allocateNewSpace(size_t& numPages)
+    {
+        size_t newNumPages = (((numPages * pageSize() + JIT_ALLOCATOR_LARGE_ALLOC_SIZE - 1) / JIT_ALLOCATOR_LARGE_ALLOC_SIZE * JIT_ALLOCATOR_LARGE_ALLOC_SIZE) + pageSize() - 1) / pageSize();
+        
+        ASSERT(newNumPages >= numPages);
+        
+        numPages = newNumPages;
+        
+        PageReservation reservation = PageReservation::reserve(numPages * pageSize(), OSAllocator::JSJITCodePages, EXECUTABLE_POOL_WRITABLE, true);
+        if (!reservation)
+            CRASH();
+        
+        reservations.append(reservation);
+        
+        return reservation.base();
+    }
+    
+    virtual void notifyNeedPage(void* page)
+    {
+        OSAllocator::commit(page, pageSize(), EXECUTABLE_POOL_WRITABLE, true);
+    }
+    
+    virtual void notifyPageIsFree(void* page)
+    {
+        OSAllocator::decommit(page, pageSize());
+    }
+
+private:
+    Vector<PageReservation, 16> reservations;
+};
+
+static DemandExecutableAllocator* allocator;
+
+void ExecutableAllocator::initializeAllocator()
 {
-    PageAllocation allocation = PageAllocation::allocate(size, OSAllocator::JSJITCodePages, EXECUTABLE_POOL_WRITABLE, true);
-    if (!allocation)
-        CRASH();
-    return allocation;
+    ASSERT(!allocator);
+    allocator = new DemandExecutableAllocator();
 }
 
-void ExecutablePool::systemRelease(ExecutablePool::Allocation& allocation)
+ExecutableAllocator::ExecutableAllocator(JSGlobalData&)
 {
-    allocation.deallocate();
+    ASSERT(allocator);
 }
 
 bool ExecutableAllocator::isValid() const
 {
     return true;
 }
-    
+
 bool ExecutableAllocator::underMemoryPressure()
 {
     return false;
 }
-    
+
+PassRefPtr<ExecutableMemoryHandle> ExecutableAllocator::allocate(JSGlobalData&, size_t sizeInBytes)
+{
+    RefPtr<ExecutableMemoryHandle> result = allocator->allocate(sizeInBytes);
+    if (!result)
+        CRASH();
+    return result.release();
+}
+
 size_t ExecutableAllocator::committedByteCount()
 {
-    return 0;
-} 
+    return allocator->bytesCommitted();
+}
 
+#if ENABLE(META_ALLOCATOR_PROFILE)
+void ExecutableAllocator::dumpProfile()
+{
+    allocator->dumpProfile();
+}
 #endif
 
+#endif // ENABLE(EXECUTABLE_ALLOCATOR_DEMAND)
+
 #if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
 
 #if OS(WINDOWS) || OS(SYMBIAN)
index b0c8de8..d30eaea 100644 (file)
@@ -28,6 +28,7 @@
 #include <stddef.h> // for ptrdiff_t
 #include <limits>
 #include <wtf/Assertions.h>
+#include <wtf/MetaAllocatorHandle.h>
 #include <wtf/PageAllocation.h>
 #include <wtf/PassRefPtr.h>
 #include <wtf/RefCounted.h>
@@ -102,120 +103,27 @@ inline size_t roundUpAllocationSize(size_t request, size_t granularity)
 
 namespace JSC {
 
-class ExecutablePool : public RefCounted<ExecutablePool> {
-public:
-#if ENABLE(EXECUTABLE_ALLOCATOR_DEMAND)
-    typedef PageAllocation Allocation;
-#else
-    class Allocation {
-    public:
-        Allocation(void* base, size_t size)
-            : m_base(base)
-            , m_size(size)
-        {
-        }
-        void* base() { return m_base; }
-        size_t size() { return m_size; }
-        bool operator!() const { return !m_base; }
-
-    private:
-        void* m_base;
-        size_t m_size;
-    };
-#endif
-    typedef Vector<Allocation, 2> AllocationList;
-
-    static PassRefPtr<ExecutablePool> create(JSGlobalData& globalData, size_t n)
-    {
-        return adoptRef(new ExecutablePool(globalData, n));
-    }
-
-    void* alloc(JSGlobalData& globalData, size_t n)
-    {
-        ASSERT(m_freePtr <= m_end);
-
-        // Round 'n' up to a multiple of word size; if all allocations are of
-        // word sized quantities, then all subsequent allocations will be aligned.
-        n = roundUpAllocationSize(n, sizeof(void*));
-
-        if (static_cast<ptrdiff_t>(n) < (m_end - m_freePtr)) {
-            void* result = m_freePtr;
-            m_freePtr += n;
-            return result;
-        }
-
-        // Insufficient space to allocate in the existing pool
-        // so we need allocate into a new pool
-        return poolAllocate(globalData, n);
-    }
-    
-    void tryShrink(void* allocation, size_t oldSize, size_t newSize)
-    {
-        if (static_cast<char*>(allocation) + oldSize != m_freePtr)
-            return;
-        m_freePtr = static_cast<char*>(allocation) + roundUpAllocationSize(newSize, sizeof(void*));
-    }
-
-    ~ExecutablePool()
-    {
-        AllocationList::iterator end = m_pools.end();
-        for (AllocationList::iterator ptr = m_pools.begin(); ptr != end; ++ptr)
-            ExecutablePool::systemRelease(*ptr);
-    }
-
-    size_t available() const { return (m_pools.size() > 1) ? 0 : m_end - m_freePtr; }
-
-private:
-    static Allocation systemAlloc(size_t n);
-    static void systemRelease(Allocation& alloc);
-
-    ExecutablePool(JSGlobalData&, size_t n);
-
-    void* poolAllocate(JSGlobalData&, size_t n);
-
-    char* m_freePtr;
-    char* m_end;
-    AllocationList m_pools;
-};
+typedef WTF::MetaAllocatorHandle ExecutableMemoryHandle;
 
 class ExecutableAllocator {
     enum ProtectionSetting { Writable, Executable };
 
 public:
-    ExecutableAllocator(JSGlobalData& globalData)
-    {
-        if (isValid())
-            m_smallAllocationPool = ExecutablePool::create(globalData, JIT_ALLOCATOR_LARGE_ALLOC_SIZE);
-#if !ENABLE(INTERPRETER)
-        else
-            CRASH();
-#endif
-    }
+    ExecutableAllocator(JSGlobalData&);
+    
+    static void initializeAllocator();
 
     bool isValid() const;
 
     static bool underMemoryPressure();
+    
+#if ENABLE(META_ALLOCATOR_PROFILE)
+    static void dumpProfile();
+#else
+    static void dumpProfile() { }
+#endif
 
-    PassRefPtr<ExecutablePool> poolForSize(JSGlobalData& globalData, size_t n)
-    {
-        // Try to fit in the existing small allocator
-        ASSERT(m_smallAllocationPool);
-        if (n < m_smallAllocationPool->available())
-            return m_smallAllocationPool;
-
-        // If the request is large, we just provide a unshared allocator
-        if (n > JIT_ALLOCATOR_LARGE_ALLOC_SIZE)
-            return ExecutablePool::create(globalData, n);
-
-        // Create a new allocator
-        RefPtr<ExecutablePool> pool = ExecutablePool::create(globalData, JIT_ALLOCATOR_LARGE_ALLOC_SIZE);
-
-        // If the new allocator will result in more free space than in
-        // the current small allocator, then we will use it instead
-        if ((pool->available() - n) > m_smallAllocationPool->available())
-            m_smallAllocationPool = pool;
-        return pool.release();
-    }
+    PassRefPtr<ExecutableMemoryHandle> allocate(JSGlobalData&, size_t sizeInBytes);
 
 #if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
     static void makeWritable(void* start, size_t size)
@@ -348,50 +256,9 @@ private:
 #if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
     static void reprotectRegion(void*, size_t, ProtectionSetting);
 #endif
-
-    RefPtr<ExecutablePool> m_smallAllocationPool;
 };
 
-inline ExecutablePool::ExecutablePool(JSGlobalData& globalData, size_t n)
-{
-    size_t allocSize = roundUpAllocationSize(n, pageSize());
-    Allocation mem = systemAlloc(allocSize);
-    if (!mem.base()) {
-        releaseExecutableMemory(globalData);
-        mem = systemAlloc(allocSize);
-    }
-    m_pools.append(mem);
-    m_freePtr = static_cast<char*>(mem.base());
-    if (!m_freePtr)
-        CRASH(); // Failed to allocate
-    m_end = m_freePtr + allocSize;
-    deprecatedTurnOffVerifier();
-}
-
-inline void* ExecutablePool::poolAllocate(JSGlobalData& globalData, size_t n)
-{
-    size_t allocSize = roundUpAllocationSize(n, pageSize());
-    
-    Allocation result = systemAlloc(allocSize);
-    if (!result.base()) {
-        releaseExecutableMemory(globalData);
-        result = systemAlloc(allocSize);
-        if (!result.base())
-            CRASH(); // Failed to allocate
-    }
-    
-    ASSERT(m_end >= m_freePtr);
-    if ((allocSize - n) > static_cast<size_t>(m_end - m_freePtr)) {
-        // Replace allocation pool
-        m_freePtr = static_cast<char*>(result.base()) + n;
-        m_end = static_cast<char*>(result.base()) + allocSize;
-    }
-
-    m_pools.append(result);
-    return result.base();
-}
-
-}
+} // namespace JSC
 
 #endif // ENABLE(JIT) && ENABLE(ASSEMBLER)
 
index 0301845..a094c8b 100644 (file)
 
 #include <errno.h>
 
-#include "TCSpinLock.h"
 #include <sys/mman.h>
 #include <unistd.h>
-#include <wtf/AVLTree.h>
+#include <wtf/MetaAllocator.h>
 #include <wtf/PageReservation.h>
 #include <wtf/VMTags.h>
 
@@ -46,481 +45,100 @@ using namespace WTF;
 
 namespace JSC {
     
-#define TwoPow(n) (1ull << n)
-
-class AllocationTableSizeClass {
-public:
-    AllocationTableSizeClass(size_t size, size_t blockSize, unsigned log2BlockSize)
-        : m_blockSize(blockSize)
-    {
-        ASSERT(blockSize == TwoPow(log2BlockSize));
-
-        // Calculate the number of blocks needed to hold size.
-        size_t blockMask = blockSize - 1;
-        m_blockCount = (size + blockMask) >> log2BlockSize;
-
-        // Align to the smallest power of two >= m_blockCount.
-        m_blockAlignment = 1;
-        while (m_blockAlignment < m_blockCount)
-            m_blockAlignment += m_blockAlignment;
-    }
-
-    size_t blockSize() const { return m_blockSize; }
-    size_t blockCount() const { return m_blockCount; }
-    size_t blockAlignment() const { return m_blockAlignment; }
-
-    size_t size()
-    {
-        return m_blockSize * m_blockCount;
-    }
-
-private:
-    size_t m_blockSize;
-    size_t m_blockCount;
-    size_t m_blockAlignment;
-};
-
-template<unsigned log2Entries>
-class AllocationTableLeaf {
-    typedef uint64_t BitField;
-
-public:
-    static const unsigned log2SubregionSize = 12; // 2^12 == pagesize
-    static const unsigned log2RegionSize = log2SubregionSize + log2Entries;
-
-    static const size_t subregionSize = TwoPow(log2SubregionSize);
-    static const size_t regionSize = TwoPow(log2RegionSize);
-    static const unsigned entries = TwoPow(log2Entries);
-    COMPILE_ASSERT(entries <= (sizeof(BitField) * 8), AllocationTableLeaf_entries_fit_in_BitField);
-
-    AllocationTableLeaf()
-        : m_allocated(0)
-    {
-    }
-
-    ~AllocationTableLeaf()
-    {
-        ASSERT(isEmpty());
-    }
-
-    size_t allocate(AllocationTableSizeClass& sizeClass)
-    {
-        ASSERT(sizeClass.blockSize() == subregionSize);
-        ASSERT(!isFull());
-
-        size_t alignment = sizeClass.blockAlignment();
-        size_t count = sizeClass.blockCount();
-        // Use this mask to check for spans of free blocks.
-        BitField mask = ((1ull << count) - 1) << (alignment - count);
-
-        // Step in units of alignment size.
-        for (unsigned i = 0; i < entries; i += alignment) {
-            if (!(m_allocated & mask)) {
-                m_allocated |= mask;
-                return (i + (alignment - count)) << log2SubregionSize;
-            }
-            mask <<= alignment;
-        }
-        return notFound;
-    }
-
-    void free(size_t location, AllocationTableSizeClass& sizeClass)
-    {
-        ASSERT(sizeClass.blockSize() == subregionSize);
-
-        size_t entry = location >> log2SubregionSize;
-        size_t count = sizeClass.blockCount();
-        BitField mask = ((1ull << count) - 1) << entry;
-
-        ASSERT((m_allocated & mask) == mask);
-        m_allocated &= ~mask;
-    }
-
-    bool isEmpty()
-    {
-        return !m_allocated;
-    }
-
-    bool isFull()
-    {
-        return !~m_allocated;
-    }
-
-    static size_t size()
-    {
-        return regionSize;
-    }
-
-    static AllocationTableSizeClass classForSize(size_t size)
-    {
-        return AllocationTableSizeClass(size, subregionSize, log2SubregionSize);
-    }
-
-#ifndef NDEBUG
-    void dump(size_t parentOffset = 0, unsigned indent = 0)
-    {
-        for (unsigned i = 0; i < indent; ++i)
-            fprintf(stderr, "    ");
-        fprintf(stderr, "%08x: [%016llx]\n", (int)parentOffset, m_allocated);
-    }
-#endif
-
-private:
-    BitField m_allocated;
-};
-
-
-template<class NextLevel>
-class LazyAllocationTable {
-public:
-    static const unsigned log2RegionSize = NextLevel::log2RegionSize;
-    static const unsigned entries = NextLevel::entries;
-
-    LazyAllocationTable()
-        : m_ptr(0)
-    {
-    }
-
-    ~LazyAllocationTable()
-    {
-        ASSERT(isEmpty());
-    }
-
-    size_t allocate(AllocationTableSizeClass& sizeClass)
-    {
-        if (!m_ptr)
-            m_ptr = new NextLevel();
-        return m_ptr->allocate(sizeClass);
-    }
-
-    void free(size_t location, AllocationTableSizeClass& sizeClass)
-    {
-        ASSERT(m_ptr);
-        m_ptr->free(location, sizeClass);
-        if (m_ptr->isEmpty()) {
-            delete m_ptr;
-            m_ptr = 0;
-        }
-    }
-
-    bool isEmpty()
-    {
-        return !m_ptr;
-    }
-
-    bool isFull()
-    {
-        return m_ptr && m_ptr->isFull();
-    }
-
-    static size_t size()
-    {
-        return NextLevel::size();
-    }
-
-#ifndef NDEBUG
-    void dump(size_t parentOffset = 0, unsigned indent = 0)
-    {
-        ASSERT(m_ptr);
-        m_ptr->dump(parentOffset, indent);
-    }
-#endif
-
-    static AllocationTableSizeClass classForSize(size_t size)
-    {
-        return NextLevel::classForSize(size);
-    }
-
-private:
-    NextLevel* m_ptr;
-};
-
-template<class NextLevel, unsigned log2Entries>
-class AllocationTableDirectory {
-    typedef uint64_t BitField;
-
-public:
-    static const unsigned log2SubregionSize = NextLevel::log2RegionSize;
-    static const unsigned log2RegionSize = log2SubregionSize + log2Entries;
-
-    static const size_t subregionSize = TwoPow(log2SubregionSize);
-    static const size_t regionSize = TwoPow(log2RegionSize);
-    static const unsigned entries = TwoPow(log2Entries);
-    COMPILE_ASSERT(entries <= (sizeof(BitField) * 8), AllocationTableDirectory_entries_fit_in_BitField);
-
-    AllocationTableDirectory()
-        : m_full(0)
-        , m_hasSuballocation(0)
-    {
-    }
-
-    ~AllocationTableDirectory()
-    {
-        ASSERT(isEmpty());
-    }
-
-    size_t allocate(AllocationTableSizeClass& sizeClass)
-    {
-        ASSERT(sizeClass.blockSize() <= subregionSize);
-        ASSERT(!isFull());
-
-        if (sizeClass.blockSize() < subregionSize) {
-            BitField bit = 1;
-            for (unsigned i = 0; i < entries; ++i, bit += bit) {
-                if (m_full & bit)
-                    continue;
-                size_t location = m_suballocations[i].allocate(sizeClass);
-                if (location != notFound) {
-                    // If this didn't already have a subregion, it does now!
-                    m_hasSuballocation |= bit;
-                    // Mirror the suballocation's full bit.
-                    if (m_suballocations[i].isFull())
-                        m_full |= bit;
-                    return (i * subregionSize) | location;
-                }
-            }
-            return notFound;
-        }
-
-        // A block is allocated if either it is fully allocated or contains suballocations.
-        BitField allocated = m_full | m_hasSuballocation;
-
-        size_t alignment = sizeClass.blockAlignment();
-        size_t count = sizeClass.blockCount();
-        // Use this mask to check for spans of free blocks.
-        BitField mask = ((1ull << count) - 1) << (alignment - count);
-
-        // Step in units of alignment size.
-        for (unsigned i = 0; i < entries; i += alignment) {
-            if (!(allocated & mask)) {
-                m_full |= mask;
-                return (i + (alignment - count)) << log2SubregionSize;
-            }
-            mask <<= alignment;
-        }
-        return notFound;
-    }
-
-    void free(size_t location, AllocationTableSizeClass& sizeClass)
-    {
-        ASSERT(sizeClass.blockSize() <= subregionSize);
-
-        size_t entry = location >> log2SubregionSize;
-
-        if (sizeClass.blockSize() < subregionSize) {
-            BitField bit = 1ull << entry;
-            m_suballocations[entry].free(location & (subregionSize - 1), sizeClass);
-            // Check if the suballocation is now empty.
-            if (m_suballocations[entry].isEmpty())
-                m_hasSuballocation &= ~bit;
-            // No need to check, it clearly isn't full any more!
-            m_full &= ~bit;
-        } else {
-            size_t count = sizeClass.blockCount();
-            BitField mask = ((1ull << count) - 1) << entry;
-            ASSERT((m_full & mask) == mask);
-            ASSERT(!(m_hasSuballocation & mask));
-            m_full &= ~mask;
-        }
-    }
-
-    bool isEmpty()
-    {
-        return !(m_full | m_hasSuballocation);
-    }
-
-    bool isFull()
-    {   
-        return !~m_full;
-    }
-
-    static size_t size()
-    {
-        return regionSize;
-    }
-
-    static AllocationTableSizeClass classForSize(size_t size)
-    {
-        if (size < subregionSize) {
-            AllocationTableSizeClass sizeClass = NextLevel::classForSize(size);
-            if (sizeClass.size() < NextLevel::size())
-                return sizeClass;
-        }
-        return AllocationTableSizeClass(size, subregionSize, log2SubregionSize);
-    }
-
-#ifndef NDEBUG
-    void dump(size_t parentOffset = 0, unsigned indent = 0)
-    {
-        for (unsigned i = 0; i < indent; ++i)
-            fprintf(stderr, "    ");
-        fprintf(stderr, "%08x: [", (int)parentOffset);
-        for (unsigned i = 0; i < entries; ++i) {
-            BitField bit = 1ull << i;
-            char c = m_hasSuballocation & bit
-                ? (m_full & bit ? 'N' : 'n')
-                : (m_full & bit ? 'F' : '-');
-            fprintf(stderr, "%c", c);
-        }
-        fprintf(stderr, "]\n");
-
-        for (unsigned i = 0; i < entries; ++i) {
-            BitField bit = 1ull << i;
-            size_t offset = parentOffset | (subregionSize * i);
-            if (m_hasSuballocation & bit)
-                m_suballocations[i].dump(offset, indent + 1);
-        }
-    }
-#endif
-
-private:
-    NextLevel m_suballocations[entries];
-    // Subregions exist in one of four states:
-    // (1) empty (both bits clear)
-    // (2) fully allocated as a single allocation (m_full set)
-    // (3) partially allocated through suballocations (m_hasSuballocation set)
-    // (4) fully allocated through suballocations (both bits set)
-    BitField m_full;
-    BitField m_hasSuballocation;
-};
-
-
-typedef AllocationTableLeaf<6> PageTables256KB;
-typedef AllocationTableDirectory<PageTables256KB, 6> PageTables16MB;
-typedef AllocationTableDirectory<LazyAllocationTable<PageTables16MB>, 1> PageTables32MB;
-typedef AllocationTableDirectory<LazyAllocationTable<PageTables16MB>, 6> PageTables1GB;
-
 #if CPU(ARM)
-typedef PageTables16MB FixedVMPoolPageTables;
+static const size_t fixedPoolSize = 16 * 1024 * 1024;
 #elif CPU(X86_64)
-typedef PageTables1GB FixedVMPoolPageTables;
+static const size_t fixedPoolSize = 1024 * 1024 * 1024;
 #else
-typedef PageTables32MB FixedVMPoolPageTables;
+static const size_t fixedPoolSize = 32 * 1024 * 1024;
 #endif
 
-
-class FixedVMPoolAllocator
-{
+class FixedVMPoolExecutableAllocator: public MetaAllocator {
 public:
-    FixedVMPoolAllocator()
+    FixedVMPoolExecutableAllocator()
+        : MetaAllocator(32) // round up all allocations to 32 bytes
     {
-        ASSERT(PageTables256KB::size() == 256 * 1024);
-        ASSERT(PageTables16MB::size() == 16 * 1024 * 1024);
-        ASSERT(PageTables32MB::size() == 32 * 1024 * 1024);
-        ASSERT(PageTables1GB::size() == 1024 * 1024 * 1024);
-
-        m_reservation = PageReservation::reserveWithGuardPages(FixedVMPoolPageTables::size(), OSAllocator::JSJITCodePages, EXECUTABLE_POOL_WRITABLE, true);
+        m_reservation = PageReservation::reserveWithGuardPages(fixedPoolSize, OSAllocator::JSJITCodePages, EXECUTABLE_POOL_WRITABLE, true);
 #if !ENABLE(INTERPRETER)
-        if (!isValid())
+        if (!m_reservation)
             CRASH();
 #endif
+        if (m_reservation) {
+            ASSERT(m_reservation.size() == fixedPoolSize);
+            addFreshFreeSpace(m_reservation.base(), m_reservation.size());
+        }
     }
-    ExecutablePool::Allocation alloc(size_t requestedSize)
-    {
-        ASSERT(requestedSize);
-        AllocationTableSizeClass sizeClass = classForSize(requestedSize);
-        size_t size = sizeClass.size();
-        ASSERT(size);
-
-        if (size >= FixedVMPoolPageTables::size())
-            return ExecutablePool::Allocation(0, 0);
-        if (m_pages.isFull())
-            return ExecutablePool::Allocation(0, 0);
-
-        size_t offset = m_pages.allocate(sizeClass);
-        if (offset == notFound)
-            return ExecutablePool::Allocation(0, 0);
-
-        void* pointer = offsetToPointer(offset);
-        m_reservation.commit(pointer, size);
-        return ExecutablePool::Allocation(pointer, size);
-    }
-
-    void free(ExecutablePool::Allocation allocation)
+    
+protected:
+    virtual void* allocateNewSpace(size_t&)
     {
-        void* pointer = allocation.base();
-        size_t size = allocation.size();
-        ASSERT(size);
-
-        m_reservation.decommit(pointer, size);
-
-        AllocationTableSizeClass sizeClass = classForSize(size);
-        ASSERT(sizeClass.size() == size);
-        m_pages.free(pointerToOffset(pointer), sizeClass);
+        // We're operating in a fixed pool, so new allocation is always prohibited.
+        return 0;
     }
-
-    size_t allocated()
+    
+    virtual void notifyNeedPage(void* page)
     {
-        return m_reservation.committed();
+        m_reservation.commit(page, pageSize());
     }
-
-    bool isValid() const
+    
+    virtual void notifyPageIsFree(void* page)
     {
-        return !!m_reservation;
+        m_reservation.decommit(page, pageSize());
     }
 
 private:
-    AllocationTableSizeClass classForSize(size_t size)
-    {
-        return FixedVMPoolPageTables::classForSize(size);
-    }
-
-    void* offsetToPointer(size_t offset)
-    {
-        return reinterpret_cast<void*>(reinterpret_cast<intptr_t>(m_reservation.base()) + offset);
-    }
-
-    size_t pointerToOffset(void* pointer)
-    {
-        return reinterpret_cast<intptr_t>(pointer) - reinterpret_cast<intptr_t>(m_reservation.base());
-    }
-
     PageReservation m_reservation;
-    FixedVMPoolPageTables m_pages;
 };
 
+static FixedVMPoolExecutableAllocator* allocator;
 
-static SpinLock spinlock = SPINLOCK_INITIALIZER;
-static FixedVMPoolAllocator* allocator = 0;
-
+void ExecutableAllocator::initializeAllocator()
+{
+    ASSERT(!allocator);
+    allocator = new FixedVMPoolExecutableAllocator();
+}
 
-size_t ExecutableAllocator::committedByteCount()
+ExecutableAllocator::ExecutableAllocator(JSGlobalData&)
 {
-    SpinLockHolder lockHolder(&spinlock);
-    return allocator ? allocator->allocated() : 0;
-}   
+    ASSERT(allocator);
+}
 
 bool ExecutableAllocator::isValid() const
 {
-    SpinLockHolder lock_holder(&spinlock);
-    if (!allocator)
-        allocator = new FixedVMPoolAllocator();
-    return allocator->isValid();
+    return !!allocator->bytesReserved();
 }
 
 bool ExecutableAllocator::underMemoryPressure()
 {
-    // Technically we should take the spin lock here, but we don't care if we get stale data.
-    // This is only really a heuristic anyway.
-    return allocator && (allocator->allocated() > (FixedVMPoolPageTables::size() / 2));
+    MetaAllocator::Statistics statistics = allocator->currentStatistics();
+    return statistics.bytesAllocated > statistics.bytesReserved / 2;
 }
 
-ExecutablePool::Allocation ExecutablePool::systemAlloc(size_t size)
+PassRefPtr<ExecutableMemoryHandle> ExecutableAllocator::allocate(JSGlobalData& globalData, size_t sizeInBytes)
 {
-    SpinLockHolder lock_holder(&spinlock);
-    ASSERT(allocator);
-    return allocator->alloc(size);
+    RefPtr<ExecutableMemoryHandle> result = allocator->allocate(sizeInBytes);
+    if (!result) {
+        releaseExecutableMemory(globalData);
+        result = allocator->allocate(sizeInBytes);
+        if (!result)
+            CRASH();
+    }
+    return result.release();
 }
 
-void ExecutablePool::systemRelease(ExecutablePool::Allocation& allocation) 
+size_t ExecutableAllocator::committedByteCount()
 {
-    SpinLockHolder lock_holder(&spinlock);
-    ASSERT(allocator);
-    allocator->free(allocation);
+    return allocator->bytesCommitted();
 }
 
+#if ENABLE(META_ALLOCATOR_PROFILE)
+void ExecutableAllocator::dumpProfile()
+{
+    allocator->dumpProfile();
+}
+#endif
+
 }
 
 
-#endif // HAVE(ASSEMBLER)
+#endif // ENABLE(EXECUTABLE_ALLOCATOR_FIXED)
index df76476..f881c3b 100644 (file)
@@ -570,7 +570,7 @@ JITCode JIT::privateCompile(CodePtr* functionEntryArityCheck)
 
     ASSERT(m_jmpTable.isEmpty());
 
-    LinkBuffer patchBuffer(*m_globalData, this, m_globalData->executableAllocator);
+    LinkBuffer patchBuffer(*m_globalData, this);
 
     // Translate vPC offsets into addresses in JIT generated code, for switch tables.
     for (unsigned i = 0; i < m_switches.size(); ++i) {
index efd335e..c861576 100644 (file)
@@ -221,20 +221,20 @@ namespace JSC {
             jit.privateCompilePutByIdTransition(stubInfo, oldStructure, newStructure, cachedOffset, chain, returnAddress, direct);
         }
 
-        static void compileCTIMachineTrampolines(JSGlobalData* globalData, RefPtr<ExecutablePool>* executablePool, TrampolineStructure *trampolines)
+        static PassRefPtr<ExecutableMemoryHandle> compileCTIMachineTrampolines(JSGlobalData* globalData, TrampolineStructure *trampolines)
         {
             if (!globalData->canUseJIT())
-                return;
+                return 0;
             JIT jit(globalData, 0);
-            jit.privateCompileCTIMachineTrampolines(executablePool, globalData, trampolines);
+            return jit.privateCompileCTIMachineTrampolines(globalData, trampolines);
         }
 
-        static CodePtr compileCTINativeCall(JSGlobalData* globalData, PassRefPtr<ExecutablePool> executablePool, NativeFunction func)
+        static CodeRef compileCTINativeCall(JSGlobalData* globalData, NativeFunction func)
         {
             if (!globalData->canUseJIT())
-                return CodePtr();
+                return CodeRef();
             JIT jit(globalData, 0);
-            return jit.privateCompileCTINativeCall(executablePool, globalData, func);
+            return jit.privateCompileCTINativeCall(globalData, func);
         }
 
         static void patchGetByIdSelf(CodeBlock* codeblock, StructureStubInfo*, Structure*, size_t cachedOffset, ReturnAddressPtr returnAddress);
@@ -274,9 +274,9 @@ namespace JSC {
         void privateCompileGetByIdChain(StructureStubInfo*, Structure*, StructureChain*, size_t count, const Identifier&, const PropertySlot&, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame);
         void privateCompilePutByIdTransition(StructureStubInfo*, Structure*, Structure*, size_t cachedOffset, StructureChain*, ReturnAddressPtr returnAddress, bool direct);
 
-        void privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executablePool, JSGlobalData* data, TrampolineStructure *trampolines);
+        PassRefPtr<ExecutableMemoryHandle> privateCompileCTIMachineTrampolines(JSGlobalData*, TrampolineStructure*);
         Label privateCompileCTINativeCall(JSGlobalData*, bool isConstruct = false);
-        CodePtr privateCompileCTINativeCall(PassRefPtr<ExecutablePool> executablePool, JSGlobalData* data, NativeFunction func);
+        CodeRef privateCompileCTINativeCall(JSGlobalData*, NativeFunction);
         void privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress);
 
         void addSlowCase(Jump);
@@ -1050,7 +1050,7 @@ namespace JSC {
 #endif
 #endif
         WeakRandom m_randomGenerator;
-        static CodePtr stringGetByValStubGenerator(JSGlobalData* globalData, ExecutablePool* pool);
+        static CodeRef stringGetByValStubGenerator(JSGlobalData*);
         
 #if ENABLE(TIERED_COMPILATION)
         bool m_canBeOptimized;
index 56dda3f..85cc0e7 100644 (file)
@@ -83,12 +83,12 @@ namespace JSC {
         
         bool operator !() const
         {
-            return !m_ref.m_code.executableAddress();
+            return !m_ref;
         }
 
         CodePtr addressForCall()
         {
-            return m_ref.m_code;
+            return m_ref.code();
         }
 
         // This function returns the offset in bytes of 'pointerIntoCode' into
@@ -96,7 +96,7 @@ namespace JSC {
         // block of code.  It is ASSERTed that no codeblock >4gb in size.
         unsigned offsetOf(void* pointerIntoCode)
         {
-            intptr_t result = reinterpret_cast<intptr_t>(pointerIntoCode) - reinterpret_cast<intptr_t>(m_ref.m_code.executableAddress());
+            intptr_t result = reinterpret_cast<intptr_t>(pointerIntoCode) - reinterpret_cast<intptr_t>(m_ref.code().executableAddress());
             ASSERT(static_cast<intptr_t>(static_cast<unsigned>(result)) == result);
             return static_cast<unsigned>(result);
         }
@@ -104,24 +104,24 @@ namespace JSC {
         // Execute the code!
         inline JSValue execute(RegisterFile* registerFile, CallFrame* callFrame, JSGlobalData* globalData)
         {
-            JSValue result = JSValue::decode(ctiTrampoline(m_ref.m_code.executableAddress(), registerFile, callFrame, 0, Profiler::enabledProfilerReference(), globalData));
+            JSValue result = JSValue::decode(ctiTrampoline(m_ref.code().executableAddress(), registerFile, callFrame, 0, Profiler::enabledProfilerReference(), globalData));
             return globalData->exception ? jsNull() : result;
         }
 
         void* start()
         {
-            return m_ref.m_code.dataLocation();
+            return m_ref.code().dataLocation();
         }
 
         size_t size()
         {
-            ASSERT(m_ref.m_code.executableAddress());
-            return m_ref.m_size;
+            ASSERT(m_ref.code().executableAddress());
+            return m_ref.size();
         }
 
-        ExecutablePool* getExecutablePool()
+        ExecutableMemoryHandle* getExecutableMemory()
         {
-            return m_ref.m_executablePool.get();
+            return m_ref.executableMemory();
         }
         
         JITType jitType()
@@ -131,9 +131,9 @@ namespace JSC {
 
         // Host functions are a bit special; they have a m_code pointer but they
         // do not individully ref the executable pool containing the trampoline.
-        static JITCode HostFunction(CodePtr code)
+        static JITCode HostFunction(CodeRef code)
         {
-            return JITCode(code.dataLocation(), 0, 0, HostCallThunk);
+            return JITCode(code, HostCallThunk);
         }
 
         void clear()
@@ -143,8 +143,8 @@ namespace JSC {
         }
 
     private:
-        JITCode(void* code, PassRefPtr<ExecutablePool> executablePool, size_t size, JITType jitType)
-            : m_ref(code, executablePool, size)
+        JITCode(PassRefPtr<ExecutableMemoryHandle> executableMemory, JITType jitType)
+            : m_ref(executableMemory)
             , m_jitType(jitType)
         {
         }
index 0b1c34a..8f31999 100644 (file)
@@ -42,7 +42,7 @@ namespace JSC {
 
 #if USE(JSVALUE64)
 
-void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executablePool, JSGlobalData* globalData, TrampolineStructure *trampolines)
+PassRefPtr<ExecutableMemoryHandle> JIT::privateCompileCTIMachineTrampolines(JSGlobalData* globalData, TrampolineStructure *trampolines)
 {
     // (2) The second function provides fast property access for string length
     Label stringLengthBegin = align();
@@ -153,7 +153,7 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable
     Call string_failureCases3Call = makeTailRecursiveCall(string_failureCases3);
 
     // All trampolines constructed! copy the code, link up calls, and set the pointers on the Machine object.
-    LinkBuffer patchBuffer(*m_globalData, this, m_globalData->executableAllocator);
+    LinkBuffer patchBuffer(*m_globalData, this);
 
     patchBuffer.link(string_failureCases1Call, FunctionPtr(cti_op_get_by_id_string_fail));
     patchBuffer.link(string_failureCases2Call, FunctionPtr(cti_op_get_by_id_string_fail));
@@ -164,7 +164,7 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable
     patchBuffer.link(callCompileConstruct, FunctionPtr(cti_op_construct_jitCompile));
 
     CodeRef finalCode = patchBuffer.finalizeCode();
-    *executablePool = finalCode.m_executablePool;
+    RefPtr<ExecutableMemoryHandle> executableMemory = finalCode.executableMemory();
 
     trampolines->ctiVirtualCallLink = patchBuffer.trampolineAt(virtualCallLinkBegin);
     trampolines->ctiVirtualConstructLink = patchBuffer.trampolineAt(virtualConstructLinkBegin);
@@ -173,6 +173,8 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable
     trampolines->ctiNativeCall = patchBuffer.trampolineAt(nativeCallThunk);
     trampolines->ctiNativeConstruct = patchBuffer.trampolineAt(nativeConstructThunk);
     trampolines->ctiStringLengthTrampoline = patchBuffer.trampolineAt(stringLengthBegin);
+    
+    return executableMemory.release();
 }
 
 JIT::Label JIT::privateCompileCTINativeCall(JSGlobalData* globalData, bool isConstruct)
@@ -291,9 +293,9 @@ JIT::Label JIT::privateCompileCTINativeCall(JSGlobalData* globalData, bool isCon
     return nativeCallThunk;
 }
 
-JIT::CodePtr JIT::privateCompileCTINativeCall(PassRefPtr<ExecutablePool>, JSGlobalData* globalData, NativeFunction)
+JIT::CodeRef JIT::privateCompileCTINativeCall(JSGlobalData* globalData, NativeFunction)
 {
-    return globalData->jitStubs->ctiNativeCall();
+    return CodeRef::createSelfManagedCodeRef(globalData->jitStubs->ctiNativeCall());
 }
 
 void JIT::emit_op_mov(Instruction* currentInstruction)
index 61ddf5c..a421ff1 100644 (file)
@@ -40,7 +40,7 @@
 
 namespace JSC {
 
-void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executablePool, JSGlobalData* globalData, TrampolineStructure *trampolines)
+PassRefPtr<ExecutableMemoryHandle> JIT::privateCompileCTIMachineTrampolines(JSGlobalData* globalData, TrampolineStructure *trampolines)
 {
 #if ENABLE(JIT_USE_SOFT_MODULO)
     Label softModBegin = align();
@@ -152,7 +152,7 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable
     Call string_failureCases3Call = makeTailRecursiveCall(string_failureCases3);
 
     // All trampolines constructed! copy the code, link up calls, and set the pointers on the Machine object.
-    LinkBuffer patchBuffer(*m_globalData, this, m_globalData->executableAllocator);
+    LinkBuffer patchBuffer(*m_globalData, this);
 
     patchBuffer.link(string_failureCases1Call, FunctionPtr(cti_op_get_by_id_string_fail));
     patchBuffer.link(string_failureCases2Call, FunctionPtr(cti_op_get_by_id_string_fail));
@@ -163,7 +163,7 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable
     patchBuffer.link(callCompileCconstruct, FunctionPtr(cti_op_construct_jitCompile));
 
     CodeRef finalCode = patchBuffer.finalizeCode();
-    *executablePool = finalCode.m_executablePool;
+    RefPtr<ExecutableMemoryHandle> executableMemory = finalCode.executableMemory();
 
     trampolines->ctiVirtualCall = patchBuffer.trampolineAt(virtualCallBegin);
     trampolines->ctiVirtualConstruct = patchBuffer.trampolineAt(virtualConstructBegin);
@@ -175,6 +175,8 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable
 #if ENABLE(JIT_USE_SOFT_MODULO)
     trampolines->ctiSoftModulo = patchBuffer.trampolineAt(softModBegin);
 #endif
+    
+    return executableMemory.release();
 }
 
 JIT::Label JIT::privateCompileCTINativeCall(JSGlobalData* globalData, bool isConstruct)
@@ -312,10 +314,9 @@ JIT::Label JIT::privateCompileCTINativeCall(JSGlobalData* globalData, bool isCon
     return nativeCallThunk;
 }
 
-JIT::CodePtr JIT::privateCompileCTINativeCall(PassRefPtr<ExecutablePool> executablePool, JSGlobalData* globalData, NativeFunction func)
+JIT::CodeRef JIT::privateCompileCTINativeCall(JSGlobalData* globalData, NativeFunction func)
 {
     Call nativeCall;
-    Label nativeCallThunk = align();
 
     emitPutImmediateToCallFrameHeader(0, RegisterFile::CodeBlock);
 
@@ -447,12 +448,10 @@ JIT::CodePtr JIT::privateCompileCTINativeCall(PassRefPtr<ExecutablePool> executa
     ret();
 
     // All trampolines constructed! copy the code, link up calls, and set the pointers on the Machine object.
-    LinkBuffer patchBuffer(*m_globalData, this, executablePool);
+    LinkBuffer patchBuffer(*m_globalData, this);
 
     patchBuffer.link(nativeCall, FunctionPtr(func));
-    patchBuffer.finalizeCode();
-
-    return patchBuffer.trampolineAt(nativeCallThunk);
+    return patchBuffer.finalizeCode();
 }
 
 void JIT::emit_op_mov(Instruction* currentInstruction)
index 62cf92e..3b33837 100644 (file)
@@ -50,7 +50,7 @@ using namespace std;
 namespace JSC {
 #if USE(JSVALUE64)
 
-JIT::CodePtr JIT::stringGetByValStubGenerator(JSGlobalData* globalData, ExecutablePool* pool)
+JIT::CodeRef JIT::stringGetByValStubGenerator(JSGlobalData* globalData)
 {
     JSInterfaceJIT jit;
     JumpList failures;
@@ -77,8 +77,8 @@ JIT::CodePtr JIT::stringGetByValStubGenerator(JSGlobalData* globalData, Executab
     jit.move(TrustedImm32(0), regT0);
     jit.ret();
     
-    LinkBuffer patchBuffer(*globalData, &jit, pool);
-    return patchBuffer.finalizeCode().m_code;
+    LinkBuffer patchBuffer(*globalData, &jit);
+    return patchBuffer.finalizeCode();
 }
 
 void JIT::emit_op_get_by_val(Instruction* currentInstruction)
@@ -122,7 +122,7 @@ void JIT::emitSlow_op_get_by_val(Instruction* currentInstruction, Vector<SlowCas
     Jump nonCell = jump();
     linkSlowCase(iter); // base array check
     Jump notString = branchPtr(NotEqual, Address(regT0), TrustedImmPtr(m_globalData->jsStringVPtr));
-    emitNakedCall(m_globalData->getCTIStub(stringGetByValStubGenerator));
+    emitNakedCall(CodeLocationLabel(m_globalData->getCTIStub(stringGetByValStubGenerator).code()));
     Jump failed = branchTestPtr(Zero, regT0);
     emitPutVirtualRegister(dst, regT0);
     emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_get_by_val));
@@ -555,7 +555,7 @@ void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure
     restoreArgumentReferenceForTrampoline();
     Call failureCall = tailRecursiveCall();
 
-    LinkBuffer patchBuffer(*m_globalData, this, m_codeBlock->executablePool());
+    LinkBuffer patchBuffer(*m_globalData, this);
 
     patchBuffer.link(failureCall, FunctionPtr(direct ? cti_op_put_by_id_direct_fail : cti_op_put_by_id_fail));
 
@@ -564,10 +564,9 @@ void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure
         patchBuffer.link(m_calls[0].from, FunctionPtr(cti_op_put_by_id_transition_realloc));
     }
     
-    CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
-    stubInfo->stubRoutine = entryLabel;
+    stubInfo->stubRoutine = patchBuffer.finalizeCode();
     RepatchBuffer repatchBuffer(m_codeBlock);
-    repatchBuffer.relinkCallerToTrampoline(returnAddress, entryLabel);
+    repatchBuffer.relinkCallerToTrampoline(returnAddress, CodeLocationLabel(stubInfo->stubRoutine.code()));
 }
 
 void JIT::patchGetByIdSelf(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ReturnAddressPtr returnAddress)
@@ -615,7 +614,7 @@ void JIT::privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress)
     emitFastArithIntToImmNoCheck(regT2, regT0);
     Jump success = jump();
 
-    LinkBuffer patchBuffer(*m_globalData, this, m_codeBlock->executablePool());
+    LinkBuffer patchBuffer(*m_globalData, this);
 
     // Use the patch information to link the failure cases back to the original slow case routine.
     CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
@@ -626,13 +625,12 @@ void JIT::privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress)
     patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
 
     // Track the stub we have created so that it will be deleted later.
-    CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
-    stubInfo->stubRoutine = entryLabel;
+    stubInfo->stubRoutine = patchBuffer.finalizeCode();
 
     // Finally patch the jump to slow case back in the hot path to jump here instead.
     CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
     RepatchBuffer repatchBuffer(m_codeBlock);
-    repatchBuffer.relink(jumpLocation, entryLabel);
+    repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubInfo->stubRoutine.code()));
 
     // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
     repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_array_fail));
@@ -673,7 +671,7 @@ void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* str
     } else
         compileGetDirectOffset(protoObject, regT0, cachedOffset);
     Jump success = jump();
-    LinkBuffer patchBuffer(*m_globalData, this, m_codeBlock->executablePool());
+    LinkBuffer patchBuffer(*m_globalData, this);
 
     // Use the patch information to link the failure cases back to the original slow case routine.
     CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
@@ -690,13 +688,12 @@ void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* str
         }
     }
     // Track the stub we have created so that it will be deleted later.
-    CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
-    stubInfo->stubRoutine = entryLabel;
+    stubInfo->stubRoutine = patchBuffer.finalizeCode();
 
     // Finally patch the jump to slow case back in the hot path to jump here instead.
     CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
     RepatchBuffer repatchBuffer(m_codeBlock);
-    repatchBuffer.relink(jumpLocation, entryLabel);
+    repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubInfo->stubRoutine.code()));
 
     // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
     repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_proto_list));
@@ -726,7 +723,7 @@ void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, Polymorphic
         compileGetDirectOffset(regT0, regT0, structure, cachedOffset);
     Jump success = jump();
 
-    LinkBuffer patchBuffer(*m_globalData, this, m_codeBlock->executablePool());
+    LinkBuffer patchBuffer(*m_globalData, this);
 
     if (needsStubLink) {
         for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
@@ -736,7 +733,7 @@ void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, Polymorphic
     }
 
     // Use the patch information to link the failure cases back to the original slow case routine.
-    CodeLocationLabel lastProtoBegin = polymorphicStructures->list[currentIndex - 1].stubRoutine;
+    CodeLocationLabel lastProtoBegin = CodeLocationLabel(polymorphicStructures->list[currentIndex - 1].stubRoutine.code());
     if (!lastProtoBegin)
         lastProtoBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
 
@@ -745,14 +742,14 @@ void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, Polymorphic
     // On success return back to the hot patch code, at a point it will perform the store to dest for us.
     patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
 
-    CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
+    MacroAssemblerCodeRef stubCode = patchBuffer.finalizeCode();
 
-    polymorphicStructures->list[currentIndex].set(*m_globalData, m_codeBlock->ownerExecutable(), entryLabel, structure);
+    polymorphicStructures->list[currentIndex].set(*m_globalData, m_codeBlock->ownerExecutable(), stubCode, structure);
 
     // Finally patch the jump to slow case back in the hot path to jump here instead.
     CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
     RepatchBuffer repatchBuffer(m_codeBlock);
-    repatchBuffer.relink(jumpLocation, entryLabel);
+    repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubCode.code()));
 }
 
 void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, CallFrame* callFrame)
@@ -791,7 +788,7 @@ void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, Polymorphi
 
     Jump success = jump();
 
-    LinkBuffer patchBuffer(*m_globalData, this, m_codeBlock->executablePool());
+    LinkBuffer patchBuffer(*m_globalData, this);
 
     if (needsStubLink) {
         for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
@@ -801,20 +798,20 @@ void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, Polymorphi
     }
 
     // Use the patch information to link the failure cases back to the original slow case routine.
-    CodeLocationLabel lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine;
+    CodeLocationLabel lastProtoBegin = CodeLocationLabel(prototypeStructures->list[currentIndex - 1].stubRoutine.code());
     patchBuffer.link(failureCases1, lastProtoBegin);
     patchBuffer.link(failureCases2, lastProtoBegin);
 
     // On success return back to the hot patch code, at a point it will perform the store to dest for us.
     patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
 
-    CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
-    prototypeStructures->list[currentIndex].set(*m_globalData, m_codeBlock->ownerExecutable(), entryLabel, structure, prototypeStructure);
+    MacroAssemblerCodeRef stubCode = patchBuffer.finalizeCode();
+    prototypeStructures->list[currentIndex].set(*m_globalData, m_codeBlock->ownerExecutable(), stubCode, structure, prototypeStructure);
 
     // Finally patch the jump to slow case back in the hot path to jump here instead.
     CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
     RepatchBuffer repatchBuffer(m_codeBlock);
-    repatchBuffer.relink(jumpLocation, entryLabel);
+    repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubCode.code()));
 }
 
 void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, CallFrame* callFrame)
@@ -857,7 +854,7 @@ void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, Polymorphi
         compileGetDirectOffset(protoObject, regT0, cachedOffset);
     Jump success = jump();
 
-    LinkBuffer patchBuffer(*m_globalData, this, m_codeBlock->executablePool());
+    LinkBuffer patchBuffer(*m_globalData, this);
     
     if (needsStubLink) {
         for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
@@ -867,22 +864,22 @@ void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, Polymorphi
     }
 
     // Use the patch information to link the failure cases back to the original slow case routine.
-    CodeLocationLabel lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine;
+    CodeLocationLabel lastProtoBegin = CodeLocationLabel(prototypeStructures->list[currentIndex - 1].stubRoutine.code());
 
     patchBuffer.link(bucketsOfFail, lastProtoBegin);
 
     // On success return back to the hot patch code, at a point it will perform the store to dest for us.
     patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
 
-    CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
+    CodeRef stubRoutine = patchBuffer.finalizeCode();
 
     // Track the stub we have created so that it will be deleted later.
-    prototypeStructures->list[currentIndex].set(callFrame->globalData(), m_codeBlock->ownerExecutable(), entryLabel, structure, chain);
+    prototypeStructures->list[currentIndex].set(callFrame->globalData(), m_codeBlock->ownerExecutable(), stubRoutine, structure, chain);
 
     // Finally patch the jump to slow case back in the hot path to jump here instead.
     CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
     RepatchBuffer repatchBuffer(m_codeBlock);
-    repatchBuffer.relink(jumpLocation, entryLabel);
+    repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubRoutine.code()));
 }
 
 void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame)
@@ -925,7 +922,7 @@ void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* str
         compileGetDirectOffset(protoObject, regT0, cachedOffset);
     Jump success = jump();
 
-    LinkBuffer patchBuffer(*m_globalData, this, m_codeBlock->executablePool());
+    LinkBuffer patchBuffer(*m_globalData, this);
 
     if (needsStubLink) {
         for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
@@ -941,13 +938,13 @@ void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* str
     patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
 
     // Track the stub we have created so that it will be deleted later.
-    CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
-    stubInfo->stubRoutine = entryLabel;
+    CodeRef stubRoutine = patchBuffer.finalizeCode();
+    stubInfo->stubRoutine = stubRoutine;
 
     // Finally patch the jump to slow case back in the hot path to jump here instead.
     CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
     RepatchBuffer repatchBuffer(m_codeBlock);
-    repatchBuffer.relink(jumpLocation, entryLabel);
+    repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubRoutine.code()));
 
     // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
     repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_proto_list));
index 8c45295..695d0ed 100644 (file)
@@ -165,7 +165,7 @@ void JIT::emitSlow_op_method_check(Instruction* currentInstruction, Vector<SlowC
     m_bytecodeOffset += OPCODE_LENGTH(op_get_by_id);
 }
 
-JIT::CodePtr JIT::stringGetByValStubGenerator(JSGlobalData* globalData, ExecutablePool* pool)
+JIT::CodeRef JIT::stringGetByValStubGenerator(JSGlobalData* globalData)
 {
     JSInterfaceJIT jit;
     JumpList failures;
@@ -193,8 +193,8 @@ JIT::CodePtr JIT::stringGetByValStubGenerator(JSGlobalData* globalData, Executab
     jit.move(TrustedImm32(0), regT0);
     jit.ret();
     
-    LinkBuffer patchBuffer(*globalData, &jit, pool);
-    return patchBuffer.finalizeCode().m_code;
+    LinkBuffer patchBuffer(*globalData, &jit);
+    return patchBuffer.finalizeCode();
 }
 
 void JIT::emit_op_get_by_val(Instruction* currentInstruction)
@@ -232,7 +232,7 @@ void JIT::emitSlow_op_get_by_val(Instruction* currentInstruction, Vector<SlowCas
     Jump nonCell = jump();
     linkSlowCase(iter); // base array check
     Jump notString = branchPtr(NotEqual, Address(regT0), TrustedImmPtr(m_globalData->jsStringVPtr));
-    emitNakedCall(m_globalData->getCTIStub(stringGetByValStubGenerator));
+    emitNakedCall(m_globalData->getCTIStub(stringGetByValStubGenerator).code());
     Jump failed = branchTestPtr(Zero, regT0);
     emitStore(dst, regT1, regT0);
     emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_get_by_val));
@@ -541,7 +541,7 @@ void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure
     restoreArgumentReferenceForTrampoline();
     Call failureCall = tailRecursiveCall();
     
-    LinkBuffer patchBuffer(*m_globalData, this, m_codeBlock->executablePool());
+    LinkBuffer patchBuffer(*m_globalData, this);
     
     patchBuffer.link(failureCall, FunctionPtr(direct ? cti_op_put_by_id_direct_fail : cti_op_put_by_id_fail));
     
@@ -550,10 +550,9 @@ void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure
         patchBuffer.link(m_calls[0].from, FunctionPtr(cti_op_put_by_id_transition_realloc));
     }
     
-    CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
-    stubInfo->stubRoutine = entryLabel;
+    stubInfo->stubRoutine = patchBuffer.finalizeCode();
     RepatchBuffer repatchBuffer(m_codeBlock);
-    repatchBuffer.relinkCallerToTrampoline(returnAddress, entryLabel);
+    repatchBuffer.relinkCallerToTrampoline(returnAddress, CodeLocationLabel(stubInfo->stubRoutine.code()));
 }
 
 void JIT::patchGetByIdSelf(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ReturnAddressPtr returnAddress)
@@ -606,7 +605,7 @@ void JIT::privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress)
     move(TrustedImm32(JSValue::Int32Tag), regT1);
     Jump success = jump();
     
-    LinkBuffer patchBuffer(*m_globalData, this, m_codeBlock->executablePool());
+    LinkBuffer patchBuffer(*m_globalData, this);
     
     // Use the patch information to link the failure cases back to the original slow case routine.
     CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
@@ -617,13 +616,12 @@ void JIT::privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress)
     patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
     
     // Track the stub we have created so that it will be deleted later.
-    CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
-    stubInfo->stubRoutine = entryLabel;
+    stubInfo->stubRoutine = patchBuffer.finalizeCode();
     
     // Finally patch the jump to slow case back in the hot path to jump here instead.
     CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
     RepatchBuffer repatchBuffer(m_codeBlock);
-    repatchBuffer.relink(jumpLocation, entryLabel);
+    repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubInfo->stubRoutine.code()));
     
     // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
     repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_array_fail));
@@ -666,7 +664,7 @@ void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* str
     
     Jump success = jump();
     
-    LinkBuffer patchBuffer(*m_globalData, this, m_codeBlock->executablePool());
+    LinkBuffer patchBuffer(*m_globalData, this);
     
     // Use the patch information to link the failure cases back to the original slow case routine.
     CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
@@ -684,13 +682,12 @@ void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* str
     }
 
     // Track the stub we have created so that it will be deleted later.
-    CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
-    stubInfo->stubRoutine = entryLabel;
+    stubInfo->stubRoutine = patchBuffer.finalizeCode();
     
     // Finally patch the jump to slow case back in the hot path to jump here instead.
     CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
     RepatchBuffer repatchBuffer(m_codeBlock);
-    repatchBuffer.relink(jumpLocation, entryLabel);
+    repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubInfo->stubRoutine.code()));
     
     // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
     repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_proto_list));
@@ -723,7 +720,7 @@ void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, Polymorphic
 
     Jump success = jump();
     
-    LinkBuffer patchBuffer(*m_globalData, this, m_codeBlock->executablePool());
+    LinkBuffer patchBuffer(*m_globalData, this);
     if (needsStubLink) {
         for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
             if (iter->to)
@@ -731,7 +728,7 @@ void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, Polymorphic
         }
     }    
     // Use the patch information to link the failure cases back to the original slow case routine.
-    CodeLocationLabel lastProtoBegin = polymorphicStructures->list[currentIndex - 1].stubRoutine;
+    CodeLocationLabel lastProtoBegin = CodeLocationLabel(polymorphicStructures->list[currentIndex - 1].stubRoutine.code());
     if (!lastProtoBegin)
         lastProtoBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
     
@@ -740,14 +737,14 @@ void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, Polymorphic
     // On success return back to the hot patch code, at a point it will perform the store to dest for us.
     patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
 
-    CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
+    CodeRef stubRoutine = patchBuffer.finalizeCode();
 
-    polymorphicStructures->list[currentIndex].set(*m_globalData, m_codeBlock->ownerExecutable(), entryLabel, structure);
+    polymorphicStructures->list[currentIndex].set(*m_globalData, m_codeBlock->ownerExecutable(), stubRoutine, structure);
     
     // Finally patch the jump to slow case back in the hot path to jump here instead.
     CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
     RepatchBuffer repatchBuffer(m_codeBlock);
-    repatchBuffer.relink(jumpLocation, entryLabel);
+    repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubRoutine.code()));
 }
 
 void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, CallFrame* callFrame)
@@ -787,7 +784,7 @@ void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, Polymorphi
     
     Jump success = jump();
     
-    LinkBuffer patchBuffer(*m_globalData, this, m_codeBlock->executablePool());
+    LinkBuffer patchBuffer(*m_globalData, this);
     if (needsStubLink) {
         for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
             if (iter->to)
@@ -795,21 +792,21 @@ void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, Polymorphi
         }
     }
     // Use the patch information to link the failure cases back to the original slow case routine.
-    CodeLocationLabel lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine;
+    CodeLocationLabel lastProtoBegin = CodeLocationLabel(prototypeStructures->list[currentIndex - 1].stubRoutine.code());
     patchBuffer.link(failureCases1, lastProtoBegin);
     patchBuffer.link(failureCases2, lastProtoBegin);
     
     // On success return back to the hot patch code, at a point it will perform the store to dest for us.
     patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
     
-    CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
+    CodeRef stubRoutine = patchBuffer.finalizeCode();
 
-    prototypeStructures->list[currentIndex].set(callFrame->globalData(), m_codeBlock->ownerExecutable(), entryLabel, structure, prototypeStructure);
+    prototypeStructures->list[currentIndex].set(callFrame->globalData(), m_codeBlock->ownerExecutable(), stubRoutine, structure, prototypeStructure);
     
     // Finally patch the jump to slow case back in the hot path to jump here instead.
     CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
     RepatchBuffer repatchBuffer(m_codeBlock);
-    repatchBuffer.relink(jumpLocation, entryLabel);
+    repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubRoutine.code()));
 }
 
 void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, CallFrame* callFrame)
@@ -854,7 +851,7 @@ void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, Polymorphi
 
     Jump success = jump();
     
-    LinkBuffer patchBuffer(*m_globalData, this, m_codeBlock->executablePool());
+    LinkBuffer patchBuffer(*m_globalData, this);
     if (needsStubLink) {
         for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
             if (iter->to)
@@ -862,22 +859,22 @@ void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, Polymorphi
         }
     }
     // Use the patch information to link the failure cases back to the original slow case routine.
-    CodeLocationLabel lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine;
+    CodeLocationLabel lastProtoBegin = CodeLocationLabel(prototypeStructures->list[currentIndex - 1].stubRoutine.code());
     
     patchBuffer.link(bucketsOfFail, lastProtoBegin);
     
     // On success return back to the hot patch code, at a point it will perform the store to dest for us.
     patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
     
-    CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
+    CodeRef stubRoutine = patchBuffer.finalizeCode();
     
     // Track the stub we have created so that it will be deleted later.
-    prototypeStructures->list[currentIndex].set(callFrame->globalData(), m_codeBlock->ownerExecutable(), entryLabel, structure, chain);
+    prototypeStructures->list[currentIndex].set(callFrame->globalData(), m_codeBlock->ownerExecutable(), stubRoutine, structure, chain);
     
     // Finally patch the jump to slow case back in the hot path to jump here instead.
     CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
     RepatchBuffer repatchBuffer(m_codeBlock);
-    repatchBuffer.relink(jumpLocation, entryLabel);
+    repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubRoutine.code()));
 }
 
 void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame)
@@ -921,7 +918,7 @@ void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* str
         compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset);
     Jump success = jump();
     
-    LinkBuffer patchBuffer(*m_globalData, this, m_codeBlock->executablePool());
+    LinkBuffer patchBuffer(*m_globalData, this);
     if (needsStubLink) {
         for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
             if (iter->to)
@@ -935,13 +932,13 @@ void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* str
     patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
     
     // Track the stub we have created so that it will be deleted later.
-    CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
-    stubInfo->stubRoutine = entryLabel;
+    CodeRef stubRoutine = patchBuffer.finalizeCode();
+    stubInfo->stubRoutine = stubRoutine;
     
     // Finally patch the jump to slow case back in the hot path to jump here instead.
     CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
     RepatchBuffer repatchBuffer(m_codeBlock);
-    repatchBuffer.relink(jumpLocation, entryLabel);
+    repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubRoutine.code()));
     
     // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
     repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_proto_list));
index c12d06c..b4addb1 100644 (file)
@@ -757,8 +757,8 @@ JITThunks::JITThunks(JSGlobalData* globalData)
     if (!globalData->executableAllocator.isValid())
         return;
 
-    JIT::compileCTIMachineTrampolines(globalData, &m_executablePool, &m_trampolineStructure);
-    ASSERT(m_executablePool);
+    m_executableMemory = JIT::compileCTIMachineTrampolines(globalData, &m_trampolineStructure);
+    ASSERT(!!m_executableMemory);
 #if CPU(ARM_THUMB2)
     // Unfortunate the arm compiler does not like the use of offsetof on JITStackFrame (since it contains non POD types),
     // and the OBJECT_OFFSETOF macro does not appear constantish enough for it to be happy with its use in COMPILE_ASSERT
@@ -1608,7 +1608,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_self_fail)
 
         if (stubInfo->accessType == access_get_by_id_self) {
             ASSERT(!stubInfo->stubRoutine);
-            polymorphicStructureList = new PolymorphicAccessStructureList(callFrame->globalData(), codeBlock->ownerExecutable(), CodeLocationLabel(), stubInfo->u.getByIdSelf.baseObjectStructure.get());
+            polymorphicStructureList = new PolymorphicAccessStructureList(callFrame->globalData(), codeBlock->ownerExecutable(), MacroAssemblerCodeRef(), stubInfo->u.getByIdSelf.baseObjectStructure.get());
             stubInfo->initGetByIdSelfList(polymorphicStructureList, 1);
         } else {
             polymorphicStructureList = stubInfo->u.getByIdSelfList.structureList;
@@ -1634,12 +1634,12 @@ static PolymorphicAccessStructureList* getPolymorphicAccessStructureListSlot(JSG
     switch (stubInfo->accessType) {
     case access_get_by_id_proto:
         prototypeStructureList = new PolymorphicAccessStructureList(globalData, owner, stubInfo->stubRoutine, stubInfo->u.getByIdProto.baseObjectStructure.get(), stubInfo->u.getByIdProto.prototypeStructure.get());
-        stubInfo->stubRoutine = CodeLocationLabel();
+        stubInfo->stubRoutine = MacroAssemblerCodeRef();
         stubInfo->initGetByIdProtoList(prototypeStructureList, 2);
         break;
     case access_get_by_id_chain:
         prototypeStructureList = new PolymorphicAccessStructureList(globalData, owner, stubInfo->stubRoutine, stubInfo->u.getByIdChain.baseObjectStructure.get(), stubInfo->u.getByIdChain.chain.get());
-        stubInfo->stubRoutine = CodeLocationLabel();
+        stubInfo->stubRoutine = MacroAssemblerCodeRef();
         stubInfo->initGetByIdProtoList(prototypeStructureList, 2);
         break;
     case access_get_by_id_proto_list:
@@ -3598,11 +3598,11 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, to_object)
     return JSValue::encode(stackFrame.args[0].jsValue().toObject(callFrame));
 }
 
-MacroAssemblerCodePtr JITThunks::ctiStub(JSGlobalData* globalData, ThunkGenerator generator)
+MacroAssemblerCodeRef JITThunks::ctiStub(JSGlobalData* globalData, ThunkGenerator generator)
 {
-    std::pair<CTIStubMap::iterator, bool> entry = m_ctiStubMap.add(generator, MacroAssemblerCodePtr());
+    std::pair<CTIStubMap::iterator, bool> entry = m_ctiStubMap.add(generator, MacroAssemblerCodeRef());
     if (entry.second)
-        entry.first->second = generator(globalData, m_executablePool.get());
+        entry.first->second = generator(globalData);
     return entry.first->second;
 }
 
@@ -3610,7 +3610,7 @@ NativeExecutable* JITThunks::hostFunctionStub(JSGlobalData* globalData, NativeFu
 {
     std::pair<HostFunctionStubMap::iterator, bool> entry = m_hostFunctionStubMap->add(function, Weak<NativeExecutable>());
     if (!*entry.first->second)
-        entry.first->second.set(*globalData, NativeExecutable::create(*globalData, JIT::compileCTINativeCall(globalData, m_executablePool, function), function, ctiNativeConstruct(), callHostFunctionAsConstructor));
+        entry.first->second.set(*globalData, NativeExecutable::create(*globalData, JIT::compileCTINativeCall(globalData, function), function, MacroAssemblerCodeRef::createSelfManagedCodeRef(ctiNativeConstruct()), callHostFunctionAsConstructor));
     return entry.first->second.get();
 }
 
@@ -3618,8 +3618,8 @@ NativeExecutable* JITThunks::hostFunctionStub(JSGlobalData* globalData, NativeFu
 {
     std::pair<HostFunctionStubMap::iterator, bool> entry = m_hostFunctionStubMap->add(function, Weak<NativeExecutable>());
     if (!*entry.first->second) {
-        MacroAssemblerCodePtr code = globalData->canUseJIT() ? generator(globalData, m_executablePool.get()) : MacroAssemblerCodePtr();
-        entry.first->second.set(*globalData, NativeExecutable::create(*globalData, code, function, ctiNativeConstruct(), callHostFunctionAsConstructor));
+        MacroAssemblerCodeRef code = globalData->canUseJIT() ? generator(globalData) : MacroAssemblerCodeRef();
+        entry.first->second.set(*globalData, NativeExecutable::create(*globalData, code, function, MacroAssemblerCodeRef::createSelfManagedCodeRef(ctiNativeConstruct()), callHostFunctionAsConstructor));
     }
     return entry.first->second.get();
 }
index 8c527ec..e8a73c3 100644 (file)
@@ -294,7 +294,7 @@ namespace JSC {
         MacroAssemblerCodePtr ctiNativeConstruct() { return m_trampolineStructure.ctiNativeConstruct; }
         MacroAssemblerCodePtr ctiSoftModulo() { return m_trampolineStructure.ctiSoftModulo; }
 
-        MacroAssemblerCodePtr ctiStub(JSGlobalData* globalData, ThunkGenerator generator);
+        MacroAssemblerCodeRef ctiStub(JSGlobalData*, ThunkGenerator);
 
         NativeExecutable* hostFunctionStub(JSGlobalData*, NativeFunction);
         NativeExecutable* hostFunctionStub(JSGlobalData*, NativeFunction, ThunkGenerator);
@@ -302,11 +302,11 @@ namespace JSC {
         void clearHostFunctionStubs();
 
     private:
-        typedef HashMap<ThunkGenerator, MacroAssemblerCodePtr> CTIStubMap;
+        typedef HashMap<ThunkGenerator, MacroAssemblerCodeRef> CTIStubMap;
         CTIStubMap m_ctiStubMap;
         typedef HashMap<NativeFunction, Weak<NativeExecutable> > HostFunctionStubMap;
         OwnPtr<HostFunctionStubMap> m_hostFunctionStubMap;
-        RefPtr<ExecutablePool> m_executablePool;
+        RefPtr<ExecutableMemoryHandle> m_executableMemory;
 
         TrampolineStructure m_trampolineStructure;
     };
index 7ff55ea..afa8c23 100644 (file)
@@ -37,10 +37,9 @@ namespace JSC {
     class SpecializedThunkJIT : public JSInterfaceJIT {
     public:
         static const int ThisArgument = -1;
-        SpecializedThunkJIT(int expectedArgCount, JSGlobalData* globalData, ExecutablePool* pool)
+        SpecializedThunkJIT(int expectedArgCount, JSGlobalData* globalData)
             : m_expectedArgCount(expectedArgCount)
             , m_globalData(globalData)
-            , m_pool(pool)
         {
             // Check that we have the expected number of arguments
             m_failures.append(branch32(NotEqual, Address(callFrameRegister, RegisterFile::ArgumentCount * (int)sizeof(Register)), TrustedImm32(expectedArgCount + 1)));
@@ -134,13 +133,13 @@ namespace JSC {
             ret();
         }
         
-        MacroAssemblerCodePtr finalize(JSGlobalData& globalData, MacroAssemblerCodePtr fallback)
+        MacroAssemblerCodeRef finalize(JSGlobalData& globalData, MacroAssemblerCodePtr fallback)
         {
-            LinkBuffer patchBuffer(globalData, this, m_pool.get());
+            LinkBuffer patchBuffer(globalData, this);
             patchBuffer.link(m_failures, CodeLocationLabel(fallback));
             for (unsigned i = 0; i < m_calls.size(); i++)
                 patchBuffer.link(m_calls[i].first, m_calls[i].second);
-            return patchBuffer.finalizeCode().m_code;
+            return patchBuffer.finalizeCode();
         }
 
         // Assumes that the target function uses fpRegister0 as the first argument
@@ -174,7 +173,6 @@ namespace JSC {
         
         int m_expectedArgCount;
         JSGlobalData* m_globalData;
-        RefPtr<ExecutablePool> m_pool;
         MacroAssembler::JumpList m_failures;
         Vector<std::pair<Call, FunctionPtr> > m_calls;
     };
index 3bc6f5c..dcd4e31 100644 (file)
@@ -63,26 +63,26 @@ static void charToString(SpecializedThunkJIT& jit, JSGlobalData* globalData, Mac
     jit.appendFailure(jit.branchTestPtr(MacroAssembler::Zero, dst));
 }
 
-MacroAssemblerCodePtr charCodeAtThunkGenerator(JSGlobalData* globalData, ExecutablePool* pool)
+MacroAssemblerCodeRef charCodeAtThunkGenerator(JSGlobalData* globalData)
 {
-    SpecializedThunkJIT jit(1, globalData, pool);
+    SpecializedThunkJIT jit(1, globalData);
     stringCharLoad(jit);
     jit.returnInt32(SpecializedThunkJIT::regT0);
     return jit.finalize(*globalData, globalData->jitStubs->ctiNativeCall());
 }
 
-MacroAssemblerCodePtr charAtThunkGenerator(JSGlobalData* globalData, ExecutablePool* pool)
+MacroAssemblerCodeRef charAtThunkGenerator(JSGlobalData* globalData)
 {
-    SpecializedThunkJIT jit(1, globalData, pool);
+    SpecializedThunkJIT jit(1, globalData);
     stringCharLoad(jit);
     charToString(jit, globalData, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1);
     jit.returnJSCell(SpecializedThunkJIT::regT0);
     return jit.finalize(*globalData, globalData->jitStubs->ctiNativeCall());
 }
 
-MacroAssemblerCodePtr fromCharCodeThunkGenerator(JSGlobalData* globalData, ExecutablePool* pool)
+MacroAssemblerCodeRef fromCharCodeThunkGenerator(JSGlobalData* globalData)
 {
-    SpecializedThunkJIT jit(1, globalData, pool);
+    SpecializedThunkJIT jit(1, globalData);
     // load char code
     jit.loadInt32Argument(0, SpecializedThunkJIT::regT0);
     charToString(jit, globalData, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1);
@@ -90,11 +90,11 @@ MacroAssemblerCodePtr fromCharCodeThunkGenerator(JSGlobalData* globalData, Execu
     return jit.finalize(*globalData, globalData->jitStubs->ctiNativeCall());
 }
 
-MacroAssemblerCodePtr sqrtThunkGenerator(JSGlobalData* globalData, ExecutablePool* pool)
+MacroAssemblerCodeRef sqrtThunkGenerator(JSGlobalData* globalData)
 {
-    SpecializedThunkJIT jit(1, globalData, pool);
+    SpecializedThunkJIT jit(1, globalData);
     if (!jit.supportsFloatingPointSqrt())
-        return globalData->jitStubs->ctiNativeCall();
+        return MacroAssemblerCodeRef::createSelfManagedCodeRef(globalData->jitStubs->ctiNativeCall());
 
     jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
     jit.sqrtDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
@@ -178,12 +178,12 @@ defineUnaryDoubleOpWrapper(log);
 defineUnaryDoubleOpWrapper(floor);
 defineUnaryDoubleOpWrapper(ceil);
 
-MacroAssemblerCodePtr floorThunkGenerator(JSGlobalData* globalData, ExecutablePool* pool)
+MacroAssemblerCodeRef floorThunkGenerator(JSGlobalData* globalData)
 {
-    SpecializedThunkJIT jit(1, globalData, pool);
+    SpecializedThunkJIT jit(1, globalData);
     MacroAssembler::Jump nonIntJump;
     if (!UnaryDoubleOpWrapper(floor) || !jit.supportsFloatingPoint())
-        return globalData->jitStubs->ctiNativeCall();
+        return MacroAssemblerCodeRef::createSelfManagedCodeRef(globalData->jitStubs->ctiNativeCall());
     jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump);
     jit.returnInt32(SpecializedThunkJIT::regT0);
     nonIntJump.link(&jit);
@@ -197,11 +197,11 @@ MacroAssemblerCodePtr floorThunkGenerator(JSGlobalData* globalData, ExecutablePo
     return jit.finalize(*globalData, globalData->jitStubs->ctiNativeCall());
 }
 
-MacroAssemblerCodePtr ceilThunkGenerator(JSGlobalData* globalData, ExecutablePool* pool)
+MacroAssemblerCodeRef ceilThunkGenerator(JSGlobalData* globalData)
 {
-    SpecializedThunkJIT jit(1, globalData, pool);
+    SpecializedThunkJIT jit(1, globalData);
     if (!UnaryDoubleOpWrapper(ceil) || !jit.supportsFloatingPoint())
-        return globalData->jitStubs->ctiNativeCall();
+        return MacroAssemblerCodeRef::createSelfManagedCodeRef(globalData->jitStubs->ctiNativeCall());
     MacroAssembler::Jump nonIntJump;
     jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump);
     jit.returnInt32(SpecializedThunkJIT::regT0);
@@ -220,11 +220,11 @@ static const double negativeZeroConstant = -0.0;
 static const double oneConstant = 1.0;
 static const double negativeHalfConstant = -0.5;
     
-MacroAssemblerCodePtr roundThunkGenerator(JSGlobalData* globalData, ExecutablePool* pool)
+MacroAssemblerCodeRef roundThunkGenerator(JSGlobalData* globalData)
 {
-    SpecializedThunkJIT jit(1, globalData, pool);
+    SpecializedThunkJIT jit(1, globalData);
     if (!UnaryDoubleOpWrapper(jsRound) || !jit.supportsFloatingPoint())
-        return globalData->jitStubs->ctiNativeCall();
+        return MacroAssemblerCodeRef::createSelfManagedCodeRef(globalData->jitStubs->ctiNativeCall());
     MacroAssembler::Jump nonIntJump;
     jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump);
     jit.returnInt32(SpecializedThunkJIT::regT0);
@@ -239,37 +239,37 @@ MacroAssemblerCodePtr roundThunkGenerator(JSGlobalData* globalData, ExecutablePo
     return jit.finalize(*globalData, globalData->jitStubs->ctiNativeCall());
 }
 
-MacroAssemblerCodePtr expThunkGenerator(JSGlobalData* globalData, ExecutablePool* pool)
+MacroAssemblerCodeRef expThunkGenerator(JSGlobalData* globalData)
 {
     if (!UnaryDoubleOpWrapper(exp))
-        return globalData->jitStubs->ctiNativeCall();
-    SpecializedThunkJIT jit(1, globalData, pool);
+        return MacroAssemblerCodeRef::createSelfManagedCodeRef(globalData->jitStubs->ctiNativeCall());
+    SpecializedThunkJIT jit(1, globalData);
     if (!jit.supportsFloatingPoint())
-        return globalData->jitStubs->ctiNativeCall();
+        return MacroAssemblerCodeRef::createSelfManagedCodeRef(globalData->jitStubs->ctiNativeCall());
     jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
     jit.callDoubleToDouble(UnaryDoubleOpWrapper(exp));
     jit.returnDouble(SpecializedThunkJIT::fpRegT0);
     return jit.finalize(*globalData, globalData->jitStubs->ctiNativeCall());
 }
 
-MacroAssemblerCodePtr logThunkGenerator(JSGlobalData* globalData, ExecutablePool* pool)
+MacroAssemblerCodeRef logThunkGenerator(JSGlobalData* globalData)
 {
     if (!UnaryDoubleOpWrapper(log))
-        return globalData->jitStubs->ctiNativeCall();
-    SpecializedThunkJIT jit(1, globalData, pool);
+        return MacroAssemblerCodeRef::createSelfManagedCodeRef(globalData->jitStubs->ctiNativeCall());
+    SpecializedThunkJIT jit(1, globalData);
     if (!jit.supportsFloatingPoint())
-        return globalData->jitStubs->ctiNativeCall();
+        return MacroAssemblerCodeRef::createSelfManagedCodeRef(globalData->jitStubs->ctiNativeCall());
     jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
     jit.callDoubleToDouble(UnaryDoubleOpWrapper(log));
     jit.returnDouble(SpecializedThunkJIT::fpRegT0);
     return jit.finalize(*globalData, globalData->jitStubs->ctiNativeCall());
 }
 
-MacroAssemblerCodePtr absThunkGenerator(JSGlobalData* globalData, ExecutablePool* pool)
+MacroAssemblerCodeRef absThunkGenerator(JSGlobalData* globalData)
 {
-    SpecializedThunkJIT jit(1, globalData, pool);
+    SpecializedThunkJIT jit(1, globalData);
     if (!jit.supportsDoubleBitops())
-        return globalData->jitStubs->ctiNativeCall();
+        return MacroAssemblerCodeRef::createSelfManagedCodeRef(globalData->jitStubs->ctiNativeCall());
     MacroAssembler::Jump nonIntJump;
     jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump);
     jit.rshift32(SpecializedThunkJIT::regT0, MacroAssembler::TrustedImm32(31), SpecializedThunkJIT::regT1);
@@ -286,11 +286,11 @@ MacroAssemblerCodePtr absThunkGenerator(JSGlobalData* globalData, ExecutablePool
     return jit.finalize(*globalData, globalData->jitStubs->ctiNativeCall());
 }
 
-MacroAssemblerCodePtr powThunkGenerator(JSGlobalData* globalData, ExecutablePool* pool)
+MacroAssemblerCodeRef powThunkGenerator(JSGlobalData* globalData)
 {
-    SpecializedThunkJIT jit(2, globalData, pool);
+    SpecializedThunkJIT jit(2, globalData);
     if (!jit.supportsFloatingPoint())
-        return globalData->jitStubs->ctiNativeCall();
+        return MacroAssemblerCodeRef::createSelfManagedCodeRef(globalData->jitStubs->ctiNativeCall());
 
     jit.loadDouble(&oneConstant, SpecializedThunkJIT::fpRegT1);
     jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
index 6a69ce2..b251f6b 100644 (file)
@@ -31,20 +31,20 @@ namespace JSC {
     class ExecutablePool;
     class JSGlobalData;
     class NativeExecutable;
-    class MacroAssemblerCodePtr;
+    class MacroAssemblerCodeRef;
 
-    typedef MacroAssemblerCodePtr (*ThunkGenerator)(JSGlobalData*, ExecutablePool*);
-    MacroAssemblerCodePtr charCodeAtThunkGenerator(JSGlobalData*, ExecutablePool*);
-    MacroAssemblerCodePtr charAtThunkGenerator(JSGlobalData*, ExecutablePool*);
-    MacroAssemblerCodePtr fromCharCodeThunkGenerator(JSGlobalData*, ExecutablePool*);
-    MacroAssemblerCodePtr absThunkGenerator(JSGlobalData*, ExecutablePool*);
-    MacroAssemblerCodePtr ceilThunkGenerator(JSGlobalData*, ExecutablePool*);
-    MacroAssemblerCodePtr expThunkGenerator(JSGlobalData*, ExecutablePool*);
-    MacroAssemblerCodePtr floorThunkGenerator(JSGlobalData*, ExecutablePool*);
-    MacroAssemblerCodePtr logThunkGenerator(JSGlobalData*, ExecutablePool*);
-    MacroAssemblerCodePtr roundThunkGenerator(JSGlobalData*, ExecutablePool*);
-    MacroAssemblerCodePtr sqrtThunkGenerator(JSGlobalData*, ExecutablePool*);
-    MacroAssemblerCodePtr powThunkGenerator(JSGlobalData*, ExecutablePool*);
+    typedef MacroAssemblerCodeRef (*ThunkGenerator)(JSGlobalData*);
+    MacroAssemblerCodeRef charCodeAtThunkGenerator(JSGlobalData*);
+    MacroAssemblerCodeRef charAtThunkGenerator(JSGlobalData*);
+    MacroAssemblerCodeRef fromCharCodeThunkGenerator(JSGlobalData*);
+    MacroAssemblerCodeRef absThunkGenerator(JSGlobalData*);
+    MacroAssemblerCodeRef ceilThunkGenerator(JSGlobalData*);
+    MacroAssemblerCodeRef expThunkGenerator(JSGlobalData*);
+    MacroAssemblerCodeRef floorThunkGenerator(JSGlobalData*);
+    MacroAssemblerCodeRef logThunkGenerator(JSGlobalData*);
+    MacroAssemblerCodeRef roundThunkGenerator(JSGlobalData*);
+    MacroAssemblerCodeRef sqrtThunkGenerator(JSGlobalData*);
+    MacroAssemblerCodeRef powThunkGenerator(JSGlobalData*);
 }
 #endif
 
index 6bf6425..cb7ccb0 100644 (file)
@@ -173,7 +173,7 @@ namespace JSC {
         typedef ExecutableBase Base;
 
 #if ENABLE(JIT)
-        static NativeExecutable* create(JSGlobalData& globalData, MacroAssemblerCodePtr callThunk, NativeFunction function, MacroAssemblerCodePtr constructThunk, NativeFunction constructor)
+        static NativeExecutable* create(JSGlobalData& globalData, MacroAssemblerCodeRef callThunk, NativeFunction function, MacroAssemblerCodeRef constructThunk, NativeFunction constructor)
         {
             NativeExecutable* executable;
             if (!callThunk) {
index 8aa151f..af5a422 100644 (file)
 #include "config.h"
 #include "InitializeThreading.h"
 
+#include "ExecutableAllocator.h"
 #include "Heap.h"
 #include "Identifier.h"
 #include "JSGlobalObject.h"
 #include "UString.h"
 #include "WriteBarrier.h"
+#include "dtoa.h"
 #include <wtf/DateMath.h>
 #include <wtf/Threading.h>
 #include <wtf/dtoa/cached-powers.h>
@@ -54,6 +56,7 @@ static void initializeThreadingOnce()
     WriteBarrierCounters::initialize();
 #endif
     JSGlobalData::storeVPtrs();
+    ExecutableAllocator::initializeAllocator();
 #if ENABLE(JSC_MULTIPLE_THREADS)
     RegisterFile::initializeThreading();
 #endif
index beed7fe..2bbc6e3 100644 (file)
@@ -186,7 +186,6 @@ JSGlobalData::JSGlobalData(GlobalDataType globalDataType, ThreadStackType thread
     , emptyList(new MarkedArgumentBuffer)
 #if ENABLE(ASSEMBLER)
     , executableAllocator(*this)
-    , regexAllocator(*this)
 #endif
     , lexer(new Lexer(this))
     , parser(new Parser)
@@ -440,6 +439,9 @@ void JSGlobalData::stopSampling()
 void JSGlobalData::dumpSampleData(ExecState* exec)
 {
     interpreter->dumpSampleData(exec);
+#if ENABLE(ASSEMBLER)
+    ExecutableAllocator::dumpProfile();
+#endif
 }
 
 void JSGlobalData::recompileAllJSFunctions()
index 445b970..321bc96 100644 (file)
@@ -193,7 +193,6 @@ namespace JSC {
         
 #if ENABLE(ASSEMBLER)
         ExecutableAllocator executableAllocator;
-        ExecutableAllocator regexAllocator;
 #endif
 
 #if !ENABLE(JIT)
@@ -216,7 +215,7 @@ namespace JSC {
         Interpreter* interpreter;
 #if ENABLE(JIT)
         OwnPtr<JITThunks> jitStubs;
-        MacroAssemblerCodePtr getCTIStub(ThunkGenerator generator)
+        MacroAssemblerCodeRef getCTIStub(ThunkGenerator generator)
         {
             return jitStubs->ctiStub(this, generator);
         }
index c004440..9d3c4e7 100644 (file)
@@ -42,6 +42,9 @@ SET(WTF_HEADERS
     MallocZoneSupport.h
     MathExtras.h
     MessageQueue.h
+    MetaAllocator.cpp
+    MetaAllocator.h
+    MetaAllocatorHandle.h
     NonCopyingSort.h
     ThreadRestrictionVerifier.h
     Noncopyable.h
@@ -69,6 +72,7 @@ SET(WTF_HEADERS
     PossiblyNull.h
     RandomNumber.h
     RandomNumberSeed.h
+    RedBlackTree.h
     RefCounted.h
     RefCountedLeakCounter.h
     RefPtr.h
diff --git a/Source/JavaScriptCore/wtf/MetaAllocator.cpp b/Source/JavaScriptCore/wtf/MetaAllocator.cpp
new file mode 100644 (file)
index 0000000..5d1fcf0
--- /dev/null
@@ -0,0 +1,415 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1.  Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer. 
+ * 2.  Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in the
+ *     documentation and/or other materials provided with the distribution. 
+ * 3.  Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ *     its contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission. 
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "MetaAllocator.h"
+
+#include <wtf/FastMalloc.h>
+
+namespace WTF {
+
+MetaAllocatorHandle::MetaAllocatorHandle(MetaAllocator* allocator, void* start, size_t sizeInBytes)
+    : m_allocator(allocator)
+    , m_start(start)
+    , m_sizeInBytes(sizeInBytes)
+{
+    ASSERT(allocator);
+    ASSERT(start);
+    ASSERT(sizeInBytes);
+}
+
+MetaAllocatorHandle::~MetaAllocatorHandle()
+{
+    if (!m_allocator)
+        return;
+    SpinLockHolder locker(&m_allocator->m_lock);
+    if (m_sizeInBytes) {
+        m_allocator->decrementPageOccupancy(m_start, m_sizeInBytes);
+        m_allocator->addFreeSpaceFromReleasedHandle(m_start, m_sizeInBytes);
+    }
+}
+
+void MetaAllocatorHandle::shrink(size_t newSizeInBytes)
+{
+    ASSERT(newSizeInBytes <= m_sizeInBytes);
+    
+    if (!m_allocator) {
+        m_sizeInBytes = newSizeInBytes;
+        return;
+    }
+    
+    SpinLockHolder locker(&m_allocator->m_lock);
+
+    newSizeInBytes = m_allocator->roundUp(newSizeInBytes);
+    
+    ASSERT(newSizeInBytes <= m_sizeInBytes);
+    
+    if (newSizeInBytes == m_sizeInBytes)
+        return;
+    
+    uintptr_t freeStart = reinterpret_cast<uintptr_t>(m_start) + newSizeInBytes;
+    size_t freeSize = m_sizeInBytes - newSizeInBytes;
+    uintptr_t freeEnd = freeStart + freeSize;
+    
+    uintptr_t firstCompletelyFreePage = (freeStart + m_allocator->m_pageSize - 1) & ~(m_allocator->m_pageSize - 1);
+    if (firstCompletelyFreePage < freeEnd)
+        m_allocator->decrementPageOccupancy(reinterpret_cast<void*>(firstCompletelyFreePage), freeSize - (firstCompletelyFreePage - freeStart));
+    
+    m_allocator->addFreeSpaceFromReleasedHandle(reinterpret_cast<void*>(freeStart), freeSize);
+    
+    m_sizeInBytes = newSizeInBytes;
+}
+
+MetaAllocator::MetaAllocator(size_t allocationGranule)
+    : m_allocationGranule(allocationGranule)
+    , m_pageSize(pageSize())
+    , m_bytesAllocated(0)
+    , m_bytesReserved(0)
+    , m_bytesCommitted(0)
+#ifndef NDEBUG
+    , m_mallocBalance(0)
+#endif
+#if ENABLE(META_ALLOCATOR_PROFILE)
+    , m_numAllocations(0)
+    , m_numFrees(0)
+#endif
+{
+    m_lock.Init();
+    
+    for (m_logPageSize = 0; m_logPageSize < 32; ++m_logPageSize) {
+        if (static_cast<size_t>(1) << m_logPageSize == m_pageSize)
+            break;
+    }
+    
+    ASSERT(static_cast<size_t>(1) << m_logPageSize == m_pageSize);
+    
+    for (m_logAllocationGranule = 0; m_logAllocationGranule < 32; ++m_logAllocationGranule) {
+        if (static_cast<size_t>(1) << m_logAllocationGranule == m_allocationGranule)
+            break;
+    }
+    
+    ASSERT(static_cast<size_t>(1) << m_logAllocationGranule == m_allocationGranule);
+}
+
+PassRefPtr<MetaAllocatorHandle> MetaAllocator::allocate(size_t sizeInBytes)
+{
+    SpinLockHolder locker(&m_lock);
+
+    if (!sizeInBytes)
+        return 0;
+    
+    sizeInBytes = roundUp(sizeInBytes);
+    
+    void* start = findAndRemoveFreeSpace(sizeInBytes);
+    if (!start) {
+        size_t requestedNumberOfPages = (sizeInBytes + m_pageSize - 1) >> m_logPageSize;
+        size_t numberOfPages = requestedNumberOfPages;
+        
+        start = allocateNewSpace(numberOfPages);
+        if (!start)
+            return 0;
+        
+        ASSERT(numberOfPages >= requestedNumberOfPages);
+        
+        size_t roundedUpSize = numberOfPages << m_logPageSize;
+        
+        ASSERT(roundedUpSize >= sizeInBytes);
+        
+        m_bytesReserved += roundedUpSize;
+        
+        if (roundedUpSize > sizeInBytes) {
+            void* freeSpaceStart = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(start) + sizeInBytes);
+            size_t freeSpaceSize = roundedUpSize - sizeInBytes;
+            addFreeSpace(freeSpaceStart, freeSpaceSize);
+        }
+    }
+    incrementPageOccupancy(start, sizeInBytes);
+    m_bytesAllocated += sizeInBytes;
+#if ENABLE(META_ALLOCATOR_PROFILE)
+    m_numAllocations++;
+#endif
+    return adoptRef(new MetaAllocatorHandle(this, start, sizeInBytes));
+}
+
+MetaAllocator::Statistics MetaAllocator::currentStatistics()
+{
+    SpinLockHolder locker(&m_lock);
+    Statistics result;
+    result.bytesAllocated = m_bytesAllocated;
+    result.bytesReserved = m_bytesReserved;
+    result.bytesCommitted = m_bytesCommitted;
+    return result;
+}
+
+void* MetaAllocator::findAndRemoveFreeSpace(size_t sizeInBytes)
+{
+    FreeSpaceNode* node = m_freeSpaceSizeMap.findLeastGreaterThanOrEqual(sizeInBytes);
+    
+    if (!node)
+        return 0;
+    
+    ASSERT(node->m_key >= sizeInBytes);
+    
+    m_freeSpaceSizeMap.remove(node);
+    
+    void* result;
+    
+    if (node->m_key == sizeInBytes) {
+        // Easy case: perfect fit, so just remove the node entirely.
+        result = node->m_value;
+        
+        m_freeSpaceStartAddressMap.remove(node->m_value);
+        m_freeSpaceEndAddressMap.remove(reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(node->m_value) + node->m_key));
+        freeFreeSpaceNode(node);
+    } else {
+        // Try to be a good citizen and ensure that the returned chunk of memory
+        // straddles as few pages as possible, but only insofar as doing so will
+        // not increase fragmentation. The intuition is that minimizing
+        // fragmentation is a strictly higher priority than minimizing the number
+        // of committed pages, since in the long run, smaller fragmentation means
+        // fewer committed pages and fewer failures in general.
+        
+        uintptr_t firstPage = reinterpret_cast<uintptr_t>(node->m_value) >> m_logPageSize;
+        uintptr_t lastPage = (reinterpret_cast<uintptr_t>(node->m_value) + node->m_key - 1) >> m_logPageSize;
+    
+        uintptr_t lastPageForLeftAllocation = (reinterpret_cast<uintptr_t>(node->m_value) + sizeInBytes - 1) >> m_logPageSize;
+        uintptr_t firstPageForRightAllocation = (reinterpret_cast<uintptr_t>(node->m_value) + node->m_key - sizeInBytes) >> m_logPageSize;
+        
+        if (lastPageForLeftAllocation - firstPage + 1 <= lastPage - firstPageForRightAllocation + 1) {
+            // Allocate in the left side of the returned chunk, and slide the node to the right.
+            result = node->m_value;
+            
+            m_freeSpaceStartAddressMap.remove(node->m_value);
+            
+            node->m_key -= sizeInBytes;
+            node->m_value = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(node->m_value) + sizeInBytes);
+            
+            m_freeSpaceSizeMap.insert(node);
+            m_freeSpaceStartAddressMap.add(node->m_value, node);
+        } else {
+            // Allocate in the right size of the returned chunk, and slide the node to the left;
+            
+            result = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(node->m_value) + node->m_key - sizeInBytes);
+            
+            m_freeSpaceEndAddressMap.remove(reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(node->m_value) + node->m_key));
+            
+            node->m_key -= sizeInBytes;
+            
+            m_freeSpaceSizeMap.insert(node);
+            m_freeSpaceEndAddressMap.add(result, node);
+        }
+    }
+    
+    return result;
+}
+
+void MetaAllocator::addFreeSpaceFromReleasedHandle(void* start, size_t sizeInBytes)
+{
+#if ENABLE(META_ALLOCATOR_PROFILE)
+    m_numFrees++;
+#endif
+    m_bytesAllocated -= sizeInBytes;
+    addFreeSpace(start, sizeInBytes);
+}
+
+void MetaAllocator::addFreshFreeSpace(void* start, size_t sizeInBytes)
+{
+    SpinLockHolder locker(&m_lock);
+    m_bytesReserved += sizeInBytes;
+    addFreeSpace(start, sizeInBytes);
+}
+
+size_t MetaAllocator::debugFreeSpaceSize()
+{
+#ifndef NDEBUG
+    SpinLockHolder locker(&m_lock);
+    size_t result = 0;
+    for (FreeSpaceNode* node = m_freeSpaceSizeMap.first(); node; node = node->successor())
+        result += node->m_key;
+    return result;
+#else
+    CRASH();
+    return 0;
+#endif
+}
+
+void MetaAllocator::addFreeSpace(void* start, size_t sizeInBytes)
+{
+    void* end = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(start) + sizeInBytes);
+    
+    HashMap<void*, FreeSpaceNode*>::iterator leftNeighbor = m_freeSpaceEndAddressMap.find(start);
+    HashMap<void*, FreeSpaceNode*>::iterator rightNeighbor = m_freeSpaceStartAddressMap.find(end);
+    
+    if (leftNeighbor != m_freeSpaceEndAddressMap.end()) {
+        // We have something we can coalesce with on the left. Remove it from the tree, and
+        // remove its end from the end address map.
+        
+        ASSERT(reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(leftNeighbor->second->m_value) + leftNeighbor->second->m_key) == leftNeighbor->first);
+        
+        FreeSpaceNode* leftNode = leftNeighbor->second;
+        
+        void* leftStart = leftNode->m_value;
+        size_t leftSize = leftNode->m_key;
+        void* leftEnd = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(leftStart) + leftSize);
+        
+        ASSERT(leftEnd == start);
+        
+        m_freeSpaceSizeMap.remove(leftNode);
+        m_freeSpaceEndAddressMap.remove(leftEnd);
+        
+        // Now check if there is also something to coalesce with on the right.
+        if (rightNeighbor != m_freeSpaceStartAddressMap.end()) {
+            // Freeing something in the middle of free blocks. Coalesce both left and
+            // right, whilst removing the right neighbor from the maps.
+            
+            ASSERT(rightNeighbor->second->m_value == rightNeighbor->first);
+            
+            FreeSpaceNode* rightNode = rightNeighbor->second;
+            void* rightStart = rightNeighbor->first;
+            size_t rightSize = rightNode->m_key;
+            void* rightEnd = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(rightStart) + rightSize);
+            
+            ASSERT(rightStart == end);
+            ASSERT(reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(leftStart) + leftSize + sizeInBytes + rightSize) == rightEnd);
+            
+            m_freeSpaceSizeMap.remove(rightNode);
+            m_freeSpaceStartAddressMap.remove(rightStart);
+            m_freeSpaceEndAddressMap.remove(rightEnd);
+            
+            freeFreeSpaceNode(rightNode);
+            
+            leftNode->m_key += sizeInBytes + rightSize;
+            
+            m_freeSpaceSizeMap.insert(leftNode);
+            m_freeSpaceEndAddressMap.add(rightEnd, leftNode);
+        } else {
+            leftNode->m_key += sizeInBytes;
+            
+            m_freeSpaceSizeMap.insert(leftNode);
+            m_freeSpaceEndAddressMap.add(end, leftNode);
+        }
+    } else {
+        // Cannot coalesce with left; try to see if we can coalesce with right.
+        
+        if (rightNeighbor != m_freeSpaceStartAddressMap.end()) {
+            FreeSpaceNode* rightNode = rightNeighbor->second;
+            void* rightStart = rightNeighbor->first;
+            size_t rightSize = rightNode->m_key;
+            void* rightEnd = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(rightStart) + rightSize);
+            
+            ASSERT(rightStart == end);
+            ASSERT_UNUSED(rightEnd, reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(start) + sizeInBytes + rightSize) == rightEnd);
+            
+            m_freeSpaceSizeMap.remove(rightNode);
+            m_freeSpaceStartAddressMap.remove(rightStart);
+            
+            rightNode->m_key += sizeInBytes;
+            rightNode->m_value = start;
+            
+            m_freeSpaceSizeMap.insert(rightNode);
+            m_freeSpaceStartAddressMap.add(start, rightNode);
+        } else {
+            // Nothing to coalesce with, so create a new free space node and add it.
+            
+            FreeSpaceNode* node = allocFreeSpaceNode();
+            
+            node->m_key = sizeInBytes;
+            node->m_value = start;
+            
+            m_freeSpaceSizeMap.insert(node);
+            m_freeSpaceStartAddressMap.add(start, node);
+            m_freeSpaceEndAddressMap.add(end, node);
+        }
+    }
+}
+
+void MetaAllocator::incrementPageOccupancy(void* address, size_t sizeInBytes)
+{
+    uintptr_t firstPage = reinterpret_cast<uintptr_t>(address) >> m_logPageSize;
+    uintptr_t lastPage = (reinterpret_cast<uintptr_t>(address) + sizeInBytes - 1) >> m_logPageSize;
+    
+    for (uintptr_t page = firstPage; page <= lastPage; ++page) {
+        HashMap<uintptr_t, size_t>::iterator iter = m_pageOccupancyMap.find(page);
+        if (iter == m_pageOccupancyMap.end()) {
+            m_pageOccupancyMap.add(page, 1);
+            m_bytesCommitted += m_pageSize;
+            notifyNeedPage(reinterpret_cast<void*>(page << m_logPageSize));
+        } else
+            iter->second++;
+    }
+}
+
+void MetaAllocator::decrementPageOccupancy(void* address, size_t sizeInBytes)
+{
+    uintptr_t firstPage = reinterpret_cast<uintptr_t>(address) >> m_logPageSize;
+    uintptr_t lastPage = (reinterpret_cast<uintptr_t>(address) + sizeInBytes - 1) >> m_logPageSize;
+    
+    for (uintptr_t page = firstPage; page <= lastPage; ++page) {
+        HashMap<uintptr_t, size_t>::iterator iter = m_pageOccupancyMap.find(page);
+        ASSERT(iter != m_pageOccupancyMap.end());
+        if (!--(iter->second)) {
+            m_pageOccupancyMap.remove(iter);
+            m_bytesCommitted -= m_pageSize;
+            notifyPageIsFree(reinterpret_cast<void*>(page << m_logPageSize));
+        }
+    }
+}
+
+size_t MetaAllocator::roundUp(size_t sizeInBytes)
+{
+    if (std::numeric_limits<size_t>::max() - m_allocationGranule <= sizeInBytes)
+        CRASH();
+    return (sizeInBytes + m_allocationGranule - 1) & ~(m_allocationGranule - 1);
+}
+
+MetaAllocator::FreeSpaceNode* MetaAllocator::allocFreeSpaceNode()
+{
+#ifndef NDEBUG
+    m_mallocBalance++;
+#endif
+    return new (fastMalloc(sizeof(FreeSpaceNode))) FreeSpaceNode(0, 0);
+}
+
+void MetaAllocator::freeFreeSpaceNode(FreeSpaceNode* node)
+{
+#ifndef NDEBUG
+    m_mallocBalance--;
+#endif
+    fastFree(node);
+}
+
+#if ENABLE(META_ALLOCATOR_PROFILE)
+void MetaAllocator::dumpProfile()
+{
+    printf("num allocations = %u, num frees = %u\n", m_numAllocations, m_numFrees);
+}
+#endif
+
+} // namespace WTF
+
+
diff --git a/Source/JavaScriptCore/wtf/MetaAllocator.h b/Source/JavaScriptCore/wtf/MetaAllocator.h
new file mode 100644 (file)
index 0000000..cf971b7
--- /dev/null
@@ -0,0 +1,174 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1.  Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer. 
+ * 2.  Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in the
+ *     documentation and/or other materials provided with the distribution. 
+ * 3.  Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ *     its contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission. 
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef WTF_MetaAllocator_h
+#define WTF_MetaAllocator_h
+
+#include "Assertions.h"
+#include "HashMap.h"
+#include "MetaAllocatorHandle.h"
+#include "Noncopyable.h"
+#include "PageBlock.h"
+#include "RedBlackTree.h"
+#include "RefCounted.h"
+#include "RefPtr.h"
+#include "TCSpinLock.h"
+
+namespace WTF {
+
+#define ENABLE_META_ALLOCATOR_PROFILE 0
+
+class MetaAllocator {
+    WTF_MAKE_NONCOPYABLE(MetaAllocator);
+public:
+    
+    MetaAllocator(size_t allocationGranule);
+    
+    virtual ~MetaAllocator();
+    
+    PassRefPtr<MetaAllocatorHandle> allocate(size_t sizeInBytes);
+    
+    // Non-atomic methods for getting allocator statistics.
+    size_t bytesAllocated() { return m_bytesAllocated; }
+    size_t bytesReserved() { return m_bytesReserved; }
+    size_t bytesCommitted() { return m_bytesCommitted; }
+    
+    // Atomic method for getting allocator statistics.
+    struct Statistics {
+        size_t bytesAllocated;
+        size_t bytesReserved;
+        size_t bytesCommitted;
+    };
+    Statistics currentStatistics();
+
+    // Add more free space to the allocator. Call this directly from
+    // the constructor if you wish to operate the allocator within a
+    // fixed pool.
+    void addFreshFreeSpace(void* start, size_t sizeInBytes);
+
+    // This is meant only for implementing tests. Never call this in release
+    // builds.
+    size_t debugFreeSpaceSize();
+    
+#if ENABLE(META_ALLOCATOR_PROFILE)
+    void dumpProfile();
+#else
+    void dumpProfile() { }
+#endif
+
+protected:
+    
+    // Allocate new virtual space, but don't commit. This may return more
+    // pages than we asked, in which case numPages is changed.
+    virtual void* allocateNewSpace(size_t& numPages) = 0;
+    
+    // Commit a page.
+    virtual void notifyNeedPage(void* page) = 0;
+    
+    // Uncommit a page.
+    virtual void notifyPageIsFree(void* page) = 0;
+    
+    // NOTE: none of the above methods are called during allocator
+    // destruction, in part because a MetaAllocator cannot die so long
+    // as there are Handles that refer to it.
+
+private:
+    
+    friend class MetaAllocatorHandle;
+    
+    typedef RedBlackTree<size_t, void*> Tree;
+    typedef Tree::Node FreeSpaceNode;
+    
+    // Remove free space from the allocator. This is effectively
+    // the allocate() function, except that it does not mark the
+    // returned space as being in-use.
+    void* findAndRemoveFreeSpace(size_t sizeInBytes);
+
+    // This is called when memory from an allocation is freed.
+    void addFreeSpaceFromReleasedHandle(void* start, size_t sizeInBytes);
+    
+    // This is the low-level implementation of adding free space; it
+    // is called from both addFreeSpaceFromReleasedHandle and from
+    // addFreshFreeSpace.
+    void addFreeSpace(void* start, size_t sizeInBytes);
+    
+    // Management of used space.
+    
+    void incrementPageOccupancy(void* address, size_t sizeInBytes);
+    void decrementPageOccupancy(void* address, size_t sizeInBytes);
+    
+    // Utilities.
+    
+    size_t roundUp(size_t sizeInBytes);
+    
+    FreeSpaceNode* allocFreeSpaceNode();
+    void freeFreeSpaceNode(FreeSpaceNode*);
+    
+    size_t m_allocationGranule;
+    unsigned m_logAllocationGranule;
+    size_t m_pageSize;
+    unsigned m_logPageSize;
+    
+    Tree m_freeSpaceSizeMap;
+    HashMap<void*, FreeSpaceNode*> m_freeSpaceStartAddressMap;
+    HashMap<void*, FreeSpaceNode*> m_freeSpaceEndAddressMap;
+    HashMap<uintptr_t, size_t> m_pageOccupancyMap;
+    
+    size_t m_bytesAllocated;
+    size_t m_bytesReserved;
+    size_t m_bytesCommitted;
+    
+    SpinLock m_lock;
+
+#ifndef NDEBUG
+    size_t m_mallocBalance;
+#endif
+
+#if ENABLE(META_ALLOCATOR_PROFILE)
+    unsigned m_numAllocations;
+    unsigned m_numFrees;
+#endif
+};
+
+inline MetaAllocator::~MetaAllocator()
+{
+    for (FreeSpaceNode* node = m_freeSpaceSizeMap.first(); node;) {
+        FreeSpaceNode* next = node->successor();
+        m_freeSpaceSizeMap.remove(node);
+        freeFreeSpaceNode(node);
+        node = next;
+    }
+#ifndef NDEBUG
+    ASSERT(!m_mallocBalance);
+#endif
+}
+
+} // namespace WTF
+
+#endif // WTF_MetaAllocator_h
+
diff --git a/Source/JavaScriptCore/wtf/MetaAllocatorHandle.h b/Source/JavaScriptCore/wtf/MetaAllocatorHandle.h
new file mode 100644 (file)
index 0000000..120f6bd
--- /dev/null
@@ -0,0 +1,98 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1.  Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer. 
+ * 2.  Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in the
+ *     documentation and/or other materials provided with the distribution. 
+ * 3.  Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ *     its contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission. 
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef WTF_MetaAllocatorHandle_h
+#define WTF_MetaAllocatorHandle_h
+
+#include <wtf/Assertions.h>
+#include <wtf/RefCounted.h>
+#include <wtf/RefPtr.h>
+
+namespace WTF {
+
+class MetaAllocator;
+
+class MetaAllocatorHandle: public RefCounted<MetaAllocatorHandle> {
+private:
+    MetaAllocatorHandle(MetaAllocator*, void* start, size_t sizeInBytes);
+    
+    MetaAllocatorHandle(void* start, size_t sizeInBytes)
+        : m_allocator(0)
+        , m_start(start)
+        , m_sizeInBytes(sizeInBytes)
+    {
+        ASSERT(start);
+    }
+    
+public:
+    ~MetaAllocatorHandle();
+    
+    static PassRefPtr<MetaAllocatorHandle> createSelfManagedHandle(void* start, size_t sizeInBytes)
+    {
+        return adoptRef(new MetaAllocatorHandle(start, sizeInBytes));
+    }
+    
+    void* start()
+    {
+        return m_start;
+    }
+    
+    void* end()
+    {
+        return reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(m_start) + m_sizeInBytes);
+    }
+        
+    size_t sizeInBytes()
+    {
+        return m_sizeInBytes;
+    }
+        
+    void shrink(size_t newSizeInBytes);
+    
+    bool isManaged()
+    {
+        return !!m_allocator;
+    }
+        
+    MetaAllocator* allocator()
+    {
+        ASSERT(m_allocator);
+        return m_allocator;
+    }
+    
+private:
+    friend class MetaAllocator;
+    
+    MetaAllocator* m_allocator;
+    void* m_start;
+    size_t m_sizeInBytes;
+};
+
+}
+
+#endif
diff --git a/Source/JavaScriptCore/wtf/RedBlackTree.h b/Source/JavaScriptCore/wtf/RedBlackTree.h
new file mode 100644 (file)
index 0000000..9efe270
--- /dev/null
@@ -0,0 +1,590 @@
+/*
+ * Copyright (C) 2010, 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1.  Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ * 2.  Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in the
+ *     documentation and/or other materials provided with the distribution.
+ * 3.  Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ *     its contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef RedBlackTree_h
+#define RedBlackTree_h
+
+#include <wtf/Assertions.h>
+#include <wtf/Noncopyable.h>
+
+namespace WTF {
+
+// This implements a red-black tree with the following properties:
+// - The allocation of nodes in the tree is entirely up to the user.
+// - If you are in possession of a pointer to a node, you can delete
+//   it from the tree. The tree will subsequently no longer have a
+//   reference to this node.
+// - The key type must implement operator< and ==.
+
+template<typename KeyType, typename ValueType>
+class RedBlackTree {
+    WTF_MAKE_NONCOPYABLE(RedBlackTree);
+private:
+    enum Color {
+        Red = 1,
+        Black
+    };
+    
+public:
+    class Node {
+        friend class RedBlackTree;
+        
+    public:
+        Node(KeyType key, ValueType value)
+            : m_key(key)
+            , m_value(value)
+        {
+        }
+        
+        const Node* successor() const
+        {
+            const Node* x = this;
+            if (x->right())
+                return treeMinimum(x->right());
+            const Node* y = x->parent();
+            while (y && x == y->right()) {
+                x = y;
+                y = y->parent();
+            }
+            return y;
+        }
+        
+        const Node* predecessor() const
+        {
+            const Node* x = this;
+            if (x->left())
+                return treeMaximum(x->left());
+            const Node* y = x->parent();
+            while (y && x == y->left()) {
+                x = y;
+                y = y->parent();
+            }
+            return y;
+        }
+        
+        Node* successor()
+        {
+            return const_cast<Node*>(const_cast<const Node*>(this)->successor());
+        }
+    
+        Node* predecessor()
+        {
+            return const_cast<Node*>(const_cast<const Node*>(this)->predecessor());
+        }
+    
+        KeyType m_key;
+        ValueType m_value;
+
+    private:
+        void reset()
+        {
+            m_left = 0;
+            m_right = 0;
+            m_parentAndRed = 1; // initialize to red
+        }
+        
+        // NOTE: these methods should pack the parent and red into a single
+        // word. But doing so appears to reveal a bug in the compiler.
+        Node* parent() const
+        {
+            return reinterpret_cast<Node*>(m_parentAndRed & ~static_cast<uintptr_t>(1));
+        }
+        
+        void setParent(Node* newParent)
+        {
+            m_parentAndRed = reinterpret_cast<uintptr_t>(newParent) | (m_parentAndRed & 1);
+        }
+        
+        Node* left() const
+        {
+            return m_left;
+        }
+        
+        void setLeft(Node* node)
+        {
+            m_left = node;
+        }
+        
+        Node* right() const
+        {
+            return m_right;
+        }
+        
+        void setRight(Node* node)
+        {
+            m_right = node;
+        }
+        
+        Color color() const
+        {
+            if (m_parentAndRed & 1)
+                return Red;
+            return Black;
+        }
+        
+        void setColor(Color value)
+        {
+            if (value == Red)
+                m_parentAndRed |= 1;
+            else
+                m_parentAndRed &= ~static_cast<uintptr_t>(1);
+        }
+        
+        Node* m_left;
+        Node* m_right;
+        uintptr_t m_parentAndRed;
+    };
+    
+    RedBlackTree()
+        : m_root(0)
+    {
+    }
+    
+    void insert(Node* x)
+    {
+        x->reset();
+        treeInsert(x);
+        x->setColor(Red);
+
+        // The node from which to start propagating updates upwards.
+        Node* updateStart = x->parent();
+
+        while (x != m_root && x->parent()->color() == Red) {
+            if (x->parent() == x->parent()->parent()->left()) {
+                Node* y = x->parent()->parent()->right();
+                if (y && y->color() == Red) {
+                    // Case 1
+                    x->parent()->setColor(Black);
+                    y->setColor(Black);
+                    x->parent()->parent()->setColor(Red);
+                    x = x->parent()->parent();
+                    updateStart = x->parent();
+                } else {
+                    if (x == x->parent()->right()) {
+                        // Case 2
+                        x = x->parent();
+                        leftRotate(x);
+                    }
+                    // Case 3
+                    x->parent()->setColor(Black);
+                    x->parent()->parent()->setColor(Red);
+                    Node* newSubTreeRoot = rightRotate(x->parent()->parent());
+                    updateStart = newSubTreeRoot->parent();
+                }
+            } else {
+                // Same as "then" clause with "right" and "left" exchanged.
+                Node* y = x->parent()->parent()->left();
+                if (y && y->color() == Red) {
+                    // Case 1
+                    x->parent()->setColor(Black);
+                    y->setColor(Black);
+                    x->parent()->parent()->setColor(Red);
+                    x = x->parent()->parent();
+                    updateStart = x->parent();
+                } else {
+                    if (x == x->parent()->left()) {
+                        // Case 2
+                        x = x->parent();
+                        rightRotate(x);
+                    }
+                    // Case 3
+                    x->parent()->setColor(Black);
+                    x->parent()->parent()->setColor(Red);
+                    Node* newSubTreeRoot = leftRotate(x->parent()->parent());
+                    updateStart = newSubTreeRoot->parent();
+                }
+            }
+        }
+
+        m_root->setColor(Black);
+    }
+
+    Node* remove(Node* z)
+    {
+        ASSERT(z);
+        ASSERT(z->parent() || z == m_root);
+        
+        // Y is the node to be unlinked from the tree.
+        Node* y;
+        if (!z->left() || !z->right())
+            y = z;
+        else
+            y = z->successor();
+
+        // Y is guaranteed to be non-null at this point.
+        Node* x;
+        if (y->left())
+            x = y->left();
+        else
+            x = y->right();
+
+        // X is the child of y which might potentially replace y in
+        // the tree. X might be null at this point.
+        Node* xParent;
+        if (x) {
+            x->setParent(y->parent());
+            xParent = x->parent();
+        } else
+            xParent = y->parent();
+        if (!y->parent())
+            m_root = x;
+        else {
+            if (y == y->parent()->left())
+                y->parent()->setLeft(x);
+            else
+                y->parent()->setRight(x);
+        }
+            
+        if (y != z) {
+            if (y->color() == Black)
+                removeFixup(x, xParent);
+            
+            y->setParent(z->parent());
+            y->setColor(z->color());
+            y->setLeft(z->left());
+            y->setRight(z->right());
+            
+            if (z->left())
+                z->left()->setParent(y);
+            if (z->right())
+                z->right()->setParent(y);
+            if (z->parent()) {
+                if (z->parent()->left() == z)
+                    z->parent()->setLeft(y);
+                else
+                    z->parent()->setRight(y);
+            } else {
+                ASSERT(m_root == z);
+                m_root = y;
+            }
+        } else if (y->color() == Black)
+            removeFixup(x, xParent);
+
+        return z;
+    }
+    
+    Node* remove(const KeyType& key)
+    {
+        Node* result = findExact(key);
+        if (!result)
+            return 0;
+        return remove(result);
+    }
+    
+    Node* findExact(const KeyType& key) const
+    {
+        for (Node* current = m_root; current;) {
+            if (current->m_key == key)
+                return current;
+            if (key < current->m_key)
+                current = current->left();
+            else
+                current = current->right();
+        }
+        return 0;
+    }
+    
+    Node* findLeastGreaterThanOrEqual(const KeyType& key) const
+    {
+        Node* best = 0;
+        for (Node* current = m_root; current;) {
+            if (current->m_key == key)
+                return current;
+            if (current->m_key < key)
+                current = current->right();
+            else {
+                best = current;
+                current = current->left();
+            }
+        }
+        return best;
+    }
+    
+    Node* findGreatestLessThanOrEqual(const KeyType& key) const
+    {
+        Node* best = 0;
+        for (Node* current = m_root; current;) {
+            if (current->m_key == key)
+                return current;
+            if (current->m_key > key)
+                current = current->left();
+            else {
+                best = current;
+                current = current->right();
+            }
+        }
+        return best;
+    }
+    
+    Node* first() const
+    {
+        if (!m_root)
+            return 0;
+        return treeMinimum(m_root);
+    }
+    
+    Node* last() const
+    {
+        if (!m_root)
+            return 0;
+        return treeMaximum(m_root);
+    }
+    
+    // This is an O(n) operation.
+    size_t size()
+    {
+        size_t result = 0;
+        for (Node* current = first(); current; current = current->successor())
+            result++;
+        return result;
+    }
+    
+    // This is an O(1) operation.
+    bool isEmpty()
+    {
+        return !m_root;
+    }
+    
+private:
+    // Finds the minimum element in the sub-tree rooted at the given
+    // node.
+    static Node* treeMinimum(Node* x)
+    {
+        while (x->left())
+            x = x->left();
+        return x;
+    }
+    
+    static Node* treeMaximum(Node* x)
+    {
+        while (x->right())
+            x = x->right();
+        return x;
+    }
+
+    static const Node* treeMinimum(const Node* x)
+    {
+        while (x->left())
+            x = x->left();
+        return x;
+    }
+    
+    static const Node* treeMaximum(const Node* x)
+    {
+        while (x->right())
+            x = x->right();
+        return x;
+    }
+
+    void treeInsert(Node* z)
+    {
+        ASSERT(!z->left());
+        ASSERT(!z->right());
+        ASSERT(!z->parent());
+        ASSERT(z->color() == Red);
+        
+        Node* y = 0;
+        Node* x = m_root;
+        while (x) {
+            y = x;
+            if (z->m_key < x->m_key)
+                x = x->left();
+            else
+                x = x->right();
+        }
+        z->setParent(y);
+        if (!y)
+            m_root = z;
+        else {
+            if (z->m_key < y->m_key)
+                y->setLeft(z);
+            else
+                y->setRight(z);
+        }
+    }
+
+    //----------------------------------------------------------------------
+    // Red-Black tree operations
+    //
+
+    // Left-rotates the subtree rooted at x.
+    // Returns the new root of the subtree (x's right child).
+    Node* leftRotate(Node* x)
+    {
+        // Set y.
+        Node* y = x->right();
+
+        // Turn y's left subtree into x's right subtree.
+        x->setRight(y->left());
+        if (y->left())
+            y->left()->setParent(x);
+
+        // Link x's parent to y.
+        y->setParent(x->parent());
+        if (!x->parent())
+            m_root = y;
+        else {
+            if (x == x->parent()->left())
+                x->parent()->setLeft(y);
+            else
+                x->parent()->setRight(y);
+        }
+
+        // Put x on y's left.
+        y->setLeft(x);
+        x->setParent(y);
+
+        return y;
+    }
+
+    // Right-rotates the subtree rooted at y.
+    // Returns the new root of the subtree (y's left child).
+    Node* rightRotate(Node* y)
+    {
+        // Set x.
+        Node* x = y->left();
+
+        // Turn x's right subtree into y's left subtree.
+        y->setLeft(x->right());
+        if (x->right())
+            x->right()->setParent(y);
+
+        // Link y's parent to x.
+        x->setParent(y->parent());
+        if (!y->parent())
+            m_root = x;
+        else {
+            if (y == y->parent()->left())
+                y->parent()->setLeft(x);
+            else
+                y->parent()->setRight(x);
+        }
+
+        // Put y on x's right.
+        x->setRight(y);
+        y->setParent(x);
+
+        return x;
+    }
+
+    // Restores the red-black property to the tree after splicing out
+    // a node. Note that x may be null, which is why xParent must be
+    // supplied.
+    void removeFixup(Node* x, Node* xParent)
+    {
+        while (x != m_root && (!x || x->color() == Black)) {
+            if (x == xParent->left()) {
+                // Note: the text points out that w can not be null.
+                // The reason is not obvious from simply looking at
+                // the code; it comes about from the properties of the
+                // red-black tree.
+                Node* w = xParent->right();
+                ASSERT(w); // x's sibling should not be null.
+                if (w->color() == Red) {
+                    // Case 1
+                    w->setColor(Black);
+                    xParent->setColor(Red);
+                    leftRotate(xParent);
+                    w = xParent->right();
+                }
+                if ((!w->left() || w->left()->color() == Black)
+                    && (!w->right() || w->right()->color() == Black)) {
+                    // Case 2
+                    w->setColor(Red);
+                    x = xParent;
+                    xParent = x->parent();
+                } else {
+                    if (!w->right() || w->right()->color() == Black) {
+                        // Case 3
+                        w->left()->setColor(Black);
+                        w->setColor(Red);
+                        rightRotate(w);
+                        w = xParent->right();
+                    }
+                    // Case 4
+                    w->setColor(xParent->color());
+                    xParent->setColor(Black);
+                    if (w->right())
+                        w->right()->setColor(Black);
+                    leftRotate(xParent);
+                    x = m_root;
+                    xParent = x->parent();
+                }
+            } else {
+                // Same as "then" clause with "right" and "left"
+                // exchanged.
+
+                // Note: the text points out that w can not be null.
+                // The reason is not obvious from simply looking at
+                // the code; it comes about from the properties of the
+                // red-black tree.
+                Node* w = xParent->left();
+                ASSERT(w); // x's sibling should not be null.
+                if (w->color() == Red) {
+                    // Case 1
+                    w->setColor(Black);
+                    xParent->setColor(Red);
+                    rightRotate(xParent);
+                    w = xParent->left();
+                }
+                if ((!w->right() || w->right()->color() == Black)
+                    && (!w->left() || w->left()->color() == Black)) {
+                    // Case 2
+                    w->setColor(Red);
+                    x = xParent;
+                    xParent = x->parent();
+                } else {
+                    if (!w->left() || w->left()->color() == Black) {
+                        // Case 3
+                        w->right()->setColor(Black);
+                        w->setColor(Red);
+                        leftRotate(w);
+                        w = xParent->left();
+                    }
+                    // Case 4
+                    w->setColor(xParent->color());
+                    xParent->setColor(Black);
+                    if (w->left())
+                        w->left()->setColor(Black);
+                    rightRotate(xParent);
+                    x = m_root;
+                    xParent = x->parent();
+                }
+            }
+        }
+        if (x)
+            x->setColor(Black);
+    }
+
+    Node* m_root;
+};
+
+}
+
+#endif
+
index c1ca13c..be94abd 100644 (file)
@@ -22,6 +22,7 @@ SOURCES += \
     wtf/HashTable.cpp \
     wtf/MD5.cpp \
     wtf/MainThread.cpp \
+    wtf/MetaAllocator.cpp \
     wtf/NullPtr.cpp \
     wtf/OSRandomSource.cpp \
     wtf/qt/MainThreadQt.cpp \
index 8c5372d..8747e5d 100644 (file)
@@ -2429,7 +2429,7 @@ public:
         backtrack();
 
         // Link & finalize the code.
-        LinkBuffer linkBuffer(*globalData, this, globalData->regexAllocator);
+        LinkBuffer linkBuffer(*globalData, this);
         m_backtrackingState.linkDataLabels(linkBuffer);
         jitObject.set(linkBuffer.finalizeCode());
         jitObject.setFallBack(m_shouldFallBack);
index 91c5b85..ba73dae 100644 (file)
@@ -65,11 +65,11 @@ public:
 
     int execute(const UChar* input, unsigned start, unsigned length, int* output)
     {
-        return reinterpret_cast<YarrJITCode>(m_ref.m_code.executableAddress())(input, start, length, output);
+        return reinterpret_cast<YarrJITCode>(m_ref.code().executableAddress())(input, start, length, output);
     }
 
 #if ENABLE(REGEXP_TRACING)
-    void *getAddr() { return m_ref.m_code.executableAddress(); }
+    void *getAddr() { return m_ref.code().executableAddress(); }
 #endif
 
 private:
index 184bb79..2567d47 100644 (file)
@@ -1,3 +1,20 @@
+2011-08-18  Filip Pizlo  <fpizlo@apple.com>
+
+        The executable allocator makes it difficult to free individual
+        chunks of executable memory
+        https://bugs.webkit.org/show_bug.cgi?id=66363
+
+        Reviewed by Oliver Hunt.
+        
+        Introduced a best-fit, balanced-tree based allocator. The allocator
+        required a balanced tree that does not allocate memory and that
+        permits the removal of individual nodes directly (as opposed to by
+        key); neither AVLTree nor WebCore's PODRedBlackTree supported this.
+        Changed all references to executable code to use a reference counted
+        handle.
+
+        * ForwardingHeaders/wtf/MetaAllocatorHandle.h: Added.
+
 2011-09-09  Mark Hahnenberg  <mhahnenberg@apple.com>
 
         Unzip initialization lists and constructors in JSCell hierarchy (5/7)
diff --git a/Source/JavaScriptGlue/ForwardingHeaders/wtf/MetaAllocatorHandle.h b/Source/JavaScriptGlue/ForwardingHeaders/wtf/MetaAllocatorHandle.h
new file mode 100644 (file)
index 0000000..a17b08e
--- /dev/null
@@ -0,0 +1 @@
+#include <JavaScriptCore/MetaAllocatorHandle.h>
index ecf9469..e29b17b 100755 (executable)
@@ -1,3 +1,25 @@
+2011-09-01  Filip Pizlo  <fpizlo@apple.com>
+
+        The executable allocator makes it difficult to free individual
+        chunks of executable memory
+        https://bugs.webkit.org/show_bug.cgi?id=66363
+
+        Reviewed by Oliver Hunt.
+        
+        Introduced a best-fit, balanced-tree based allocator. The allocator
+        required a balanced tree that does not allocate memory and that
+        permits the removal of individual nodes directly (as opposed to by
+        key); neither AVLTree nor WebCore's PODRedBlackTree supported this.
+        Changed all references to executable code to use a reference counted
+        handle.
+
+        No new layout tests because behavior is not changed.  New API unit
+        tests:
+        Tests/WTF/RedBlackTree.cpp
+        Tests/WTF/MetaAllocator.cpp
+
+        * ForwardingHeaders/wtf/MetaAllocatorHandle.h: Added.
+
 2011-09-10  Sam Weinig  <sam@webkit.org>
 
         Add isInterruptedExecutionException and isTerminatedExecutionException predicates
diff --git a/Source/WebCore/ForwardingHeaders/wtf/MetaAllocatorHandle.h b/Source/WebCore/ForwardingHeaders/wtf/MetaAllocatorHandle.h
new file mode 100644 (file)
index 0000000..a17b08e
--- /dev/null
@@ -0,0 +1 @@
+#include <JavaScriptCore/MetaAllocatorHandle.h>
index 6d9f8ac..e12af60 100644 (file)
@@ -1,3 +1,31 @@
+2011-09-01  Filip Pizlo  <fpizlo@apple.com>
+
+        The executable allocator makes it difficult to free individual
+        chunks of executable memory
+        https://bugs.webkit.org/show_bug.cgi?id=66363
+
+        Reviewed by Oliver Hunt.
+        
+        Introduced a best-fit, balanced-tree based allocator. The allocator
+        required a balanced tree that does not allocate memory and that
+        permits the removal of individual nodes directly (as opposed to by
+        key); neither AVLTree nor WebCore's PODRedBlackTree supported this.
+        Changed all references to executable code to use a reference counted
+        handle.
+
+        * TestWebKitAPI/TestWebKitAPI.xcodeproj/project.pbxproj:
+        * TestWebKitAPI/Tests/WTF/MetaAllocator.cpp: Added.
+        (TestWebKitAPI::TEST_F):
+        * TestWebKitAPI/Tests/WTF/RedBlackTree.cpp: Added.
+        (TestWebKitAPI::Pair::findExact):
+        (TestWebKitAPI::Pair::remove):
+        (TestWebKitAPI::Pair::findLeastGreaterThanOrEqual):
+        (TestWebKitAPI::Pair::assertFoundAndRemove):
+        (TestWebKitAPI::Pair::assertEqual):
+        (TestWebKitAPI::Pair::assertSameValuesForKey):
+        (TestWebKitAPI::Pair::testDriver):
+        (TestWebKitAPI::TEST_F):
+
 2011-09-10  Andy Estes  <aestes@apple.com>
 
         Move myself from committers_unable_to_review to reviewers_list.
index ced9e43..9eff04c 100644 (file)
@@ -7,6 +7,8 @@
        objects = {
 
 /* Begin PBXBuildFile section */
+               0FC6C4CC141027E0005B7F0C /* RedBlackTree.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0FC6C4CB141027E0005B7F0C /* RedBlackTree.cpp */; };
+               0FC6C4CF141034AD005B7F0C /* MetaAllocator.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0FC6C4CE141034AD005B7F0C /* MetaAllocator.cpp */; };
                1A02C84F125D4A8400E3F4BD /* Find.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1A02C84E125D4A8400E3F4BD /* Find.cpp */; };
                1A02C870125D4CFD00E3F4BD /* find.html in Copy Resources */ = {isa = PBXBuildFile; fileRef = 1A02C84B125D4A5E00E3F4BD /* find.html */; };
                1A5FEFDD1270E2A3000E2921 /* EvaluateJavaScript.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1A5FEFDC1270E2A3000E2921 /* EvaluateJavaScript.cpp */; };
 /* End PBXCopyFilesBuildPhase section */
 
 /* Begin PBXFileReference section */
+               0FC6C4CB141027E0005B7F0C /* RedBlackTree.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = RedBlackTree.cpp; path = WTF/RedBlackTree.cpp; sourceTree = "<group>"; };
+               0FC6C4CE141034AD005B7F0C /* MetaAllocator.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = MetaAllocator.cpp; path = WTF/MetaAllocator.cpp; sourceTree = "<group>"; };
                1A02C84B125D4A5E00E3F4BD /* find.html */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.html; path = find.html; sourceTree = "<group>"; };
                1A02C84E125D4A8400E3F4BD /* Find.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = Find.cpp; sourceTree = "<group>"; };
                1A5FEFDC1270E2A3000E2921 /* EvaluateJavaScript.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = EvaluateJavaScript.cpp; sourceTree = "<group>"; };
                BC9096461255618900083756 /* WTF */ = {
                        isa = PBXGroup;
                        children = (
+                               0FC6C4CE141034AD005B7F0C /* MetaAllocator.cpp */,
+                               0FC6C4CB141027E0005B7F0C /* RedBlackTree.cpp */,
                                A7A966DA140ECCC8005EF9B4 /* CheckedArithmeticOperations.cpp */,
                                C01363C713C3997300EF3964 /* StringOperators.cpp */,
                                BC90964B125561BF00083756 /* VectorBasic.cpp */,
                                C08587FC13FEC39B001EF4E5 /* InstanceMethodSwizzler.mm in Sources */,
                                C085880013FEC3A6001EF4E5 /* InstanceMethodSwizzler.mm in Sources */,
                                37DC678D140D7C5000ABCCDB /* DOMRangeOfString.mm in Sources */,
+                               0FC6C4CC141027E0005B7F0C /* RedBlackTree.cpp in Sources */,
+                               0FC6C4CF141034AD005B7F0C /* MetaAllocator.cpp in Sources */,
                                A7A966DB140ECCC8005EF9B4 /* CheckedArithmeticOperations.cpp in Sources */,
                                939BA91714103412001A01BD /* DeviceScaleFactorOnBack.mm in Sources */,
                                3799AD3A14120A43005EB0C6 /* StringByEvaluatingJavaScriptFromString.mm in Sources */,
diff --git a/Tools/TestWebKitAPI/Tests/WTF/MetaAllocator.cpp b/Tools/TestWebKitAPI/Tests/WTF/MetaAllocator.cpp
new file mode 100644 (file)
index 0000000..a35f2f0
--- /dev/null
@@ -0,0 +1,951 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1.  Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer. 
+ * 2.  Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in the
+ *     documentation and/or other materials provided with the distribution. 
+ * 3.  Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ *     its contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission. 
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <JavaScriptCore/MetaAllocator.h>
+#include <JavaScriptCore/Vector.h>
+
+using namespace WTF;
+
+namespace TestWebKitAPI {
+
+class MetaAllocatorTest: public testing::Test {
+public:
+    enum SanityCheckMode { RunSanityCheck, DontRunSanityCheck };
+    
+    enum HeapGrowthMode { DontGrowHeap, ForTestDemandAllocCoalesce, ForTestDemandAllocDontCoalesce };
+    
+    HeapGrowthMode currentHeapGrowthMode;
+    size_t allowAllocatePages;
+    size_t requestedNumPages;
+
+    class SimpleTestAllocator: public MetaAllocator {
+    public:
+        SimpleTestAllocator(MetaAllocatorTest* parent)
+            : MetaAllocator(32)
+            , m_parent(parent)
+        {
+            addFreshFreeSpace(reinterpret_cast<void*>(basePage * pageSize()), defaultPagesInHeap * pageSize());
+        }
+        
+        virtual ~SimpleTestAllocator()
+        {
+            EXPECT_TRUE(!m_parent->allocatorDestroyed);
+            m_parent->allocatorDestroyed = true;
+        }
+        
+        virtual void* allocateNewSpace(size_t& numPages)
+        {
+            switch (m_parent->currentHeapGrowthMode) {
+            case DontGrowHeap:
+                return 0;
+                
+            case ForTestDemandAllocCoalesce:
+            case ForTestDemandAllocDontCoalesce: {
+                EXPECT_TRUE(m_parent->allowAllocatePages);
+                EXPECT_TRUE(m_parent->allowAllocatePages >= numPages);
+                m_parent->requestedNumPages = numPages;
+                numPages = m_parent->allowAllocatePages;
+                
+                unsigned offset;
+                if (m_parent->currentHeapGrowthMode == ForTestDemandAllocCoalesce)
+                    offset = 0;
+                else
+                    offset = 1;
+                
+                void* result = reinterpret_cast<void*>((basePage + defaultPagesInHeap + offset) * pageSize());
+                
+                m_parent->allowAllocatePages = 0;
+                m_parent->currentHeapGrowthMode = DontGrowHeap;
+                
+                for (size_t counter = 0; counter < numPages + offset; ++counter) {
+                    m_parent->pageMap->append(false);
+                    for (unsigned byteCounter = 0; byteCounter < pageSize(); ++byteCounter)
+                        m_parent->memoryMap->append(false);
+                }
+                
+                m_parent->additionalPagesInHeap += numPages;
+        
+                return result;
+            }
+                
+            default:
+                CRASH();
+                return 0;
+            }
+        }
+        
+        virtual void notifyNeedPage(void* page)
+        {
+            // the page should be both free and unmapped.
+            EXPECT_TRUE(!m_parent->pageState(reinterpret_cast<uintptr_t>(page) / pageSize()));
+            for (uintptr_t address = reinterpret_cast<uintptr_t>(page); address < reinterpret_cast<uintptr_t>(page) + pageSize(); ++address)
+                EXPECT_TRUE(!m_parent->byteState(reinterpret_cast<void*>(address)));
+            m_parent->pageState(reinterpret_cast<uintptr_t>(page) / pageSize()) = true;
+        }
+        
+        virtual void notifyPageIsFree(void* page)
+        {
+            // the page should be free of objects at this point, but it should still
+            // be mapped.
+            EXPECT_TRUE(m_parent->pageState(reinterpret_cast<uintptr_t>(page) / pageSize()));
+            for (uintptr_t address = reinterpret_cast<uintptr_t>(page); address < reinterpret_cast<uintptr_t>(page) + pageSize(); ++address)
+                EXPECT_TRUE(!m_parent->byteState(reinterpret_cast<void*>(address)));
+            m_parent->pageState(reinterpret_cast<uintptr_t>(page) / pageSize()) = false;
+        }
+        
+    private:
+        MetaAllocatorTest* m_parent;
+    };
+
+    static const unsigned basePage = 1;
+    static const unsigned defaultPagesInHeap = 100;
+    
+    unsigned additionalPagesInHeap;
+    
+    Vector<bool, 0>* memoryMap;
+    Vector<bool, 0>* pageMap;
+    bool allocatorDestroyed;
+    
+    SimpleTestAllocator* allocator;
+    
+    virtual void SetUp()
+    {
+        memoryMap = new Vector<bool, 0>();
+        pageMap = new Vector<bool, 0>();
+        
+        for (unsigned page = basePage; page < basePage + defaultPagesInHeap; ++page) {
+            pageMap->append(false);
+            for (unsigned byteInPage = 0; byteInPage < pageSize(); ++byteInPage)
+                memoryMap->append(false);
+        }
+
+        allocatorDestroyed = false;
+        
+        currentHeapGrowthMode = DontGrowHeap;
+        allowAllocatePages = 0;
+        additionalPagesInHeap = 0;
+        requestedNumPages = 0;
+        
+        allocator = new SimpleTestAllocator(this);
+    }
+    
+    virtual void TearDown()
+    {
+        EXPECT_TRUE(currentHeapGrowthMode == DontGrowHeap);
+        EXPECT_EQ(allowAllocatePages, static_cast<size_t>(0));
+        EXPECT_EQ(requestedNumPages, static_cast<size_t>(0));
+        
+        // memory should be free.
+        for (unsigned page = basePage; page < basePage + defaultPagesInHeap; ++page) {
+            EXPECT_TRUE(!pageState(page));
+            for (unsigned byteInPage = 0; byteInPage < pageSize(); ++byteInPage)
+                EXPECT_TRUE(!byteState(page * pageSize() + byteInPage));
+        }
+        
+        // NOTE: this automatically tests that the allocator did not leak
+        // memory, so long as these tests are running with !defined(NDEBUG).
+        // See MetaAllocator::m_mallocBalance.
+        delete allocator;
+        
+        EXPECT_TRUE(allocatorDestroyed);
+        
+        delete memoryMap;
+        delete pageMap;
+    }
+    
+    MetaAllocatorHandle* allocate(size_t sizeInBytes, SanityCheckMode sanityCheckMode = RunSanityCheck)
+    {
+        MetaAllocatorHandle* handle = allocator->allocate(sizeInBytes).leakRef();
+        EXPECT_TRUE(handle);
+        EXPECT_EQ(handle->sizeInBytes(), sizeInBytes);
+        
+        uintptr_t startByte = reinterpret_cast<uintptr_t>(handle->start());
+        uintptr_t endByte = startByte + sizeInBytes;
+        for (uintptr_t currentByte = startByte; currentByte < endByte; ++currentByte) {
+            EXPECT_TRUE(!byteState(currentByte));
+            byteState(currentByte) = true;
+            EXPECT_TRUE(pageState(currentByte / pageSize()));
+        }
+        
+        if (sanityCheckMode == RunSanityCheck)
+            sanityCheck();
+        
+        return handle;
+    }
+    
+    void free(MetaAllocatorHandle* handle, SanityCheckMode sanityCheckMode = RunSanityCheck)
+    {
+        EXPECT_TRUE(handle);
+        
+        notifyFree(handle->start(), handle->sizeInBytes());
+        handle->deref();
+        
+        if (sanityCheckMode == RunSanityCheck)
+            sanityCheck();
+    }
+    
+    void notifyFree(void* start, size_t sizeInBytes)
+    {
+        uintptr_t startByte = reinterpret_cast<uintptr_t>(start);
+        uintptr_t endByte = startByte + sizeInBytes;
+        for (uintptr_t currentByte = startByte; currentByte < endByte; ++currentByte) {
+            EXPECT_TRUE(byteState(currentByte));
+            byteState(currentByte) = false;
+        }
+    }
+    
+    void sanityCheck()
+    {
+#ifndef NDEBUG
+        EXPECT_EQ(allocator->bytesReserved() - allocator->bytesAllocated(), allocator->debugFreeSpaceSize());
+#endif
+        EXPECT_EQ(allocator->bytesReserved(), (defaultPagesInHeap + additionalPagesInHeap) * pageSize());
+        EXPECT_EQ(allocator->bytesAllocated(), bytesAllocated());
+        EXPECT_EQ(allocator->bytesCommitted(), bytesCommitted());
+    }
+    
+    void confirm(MetaAllocatorHandle* handle)
+    {
+        uintptr_t startByte = reinterpret_cast<uintptr_t>(handle->start());
+        confirm(startByte, startByte + handle->sizeInBytes(), true);
+    }
+    
+    void confirmHighWatermark(MetaAllocatorHandle* handle)
+    {
+        confirm(reinterpret_cast<uintptr_t>(handle->end()), (basePage + defaultPagesInHeap) * pageSize(), false);
+    }
+                
+    void confirm(uintptr_t startByte, uintptr_t endByte, bool value)
+    {
+        for (uintptr_t currentByte = startByte; currentByte < endByte; ++currentByte) {
+            EXPECT_EQ(byteState(currentByte), value);
+            if (value)
+                EXPECT_TRUE(pageState(currentByte / pageSize()));
+        }
+        if (!value) {
+            uintptr_t firstFreePage = (startByte + pageSize() - 1) / pageSize();
+            uintptr_t lastFreePage = (endByte - pageSize()) / pageSize();
+            for (uintptr_t currentPage = firstFreePage; currentPage <= lastFreePage; ++currentPage)
+                EXPECT_TRUE(!pageState(currentPage));
+        }
+    }
+    
+    size_t bytesAllocated()
+    {
+        size_t result = 0;
+        for (unsigned index = 0; index < memoryMap->size(); ++index) {
+            if (memoryMap->at(index))
+                result++;
+        }
+        return result;
+    }
+    
+    size_t bytesCommitted()
+    {
+        size_t result = 0;
+        for (unsigned index = 0; index < pageMap->size(); ++index) {
+            if (pageMap->at(index))
+                result++;
+        }
+        return result * pageSize();
+    }
+    
+    bool& byteState(void* address)
+    {
+        return byteState(reinterpret_cast<uintptr_t>(address));
+    }
+    
+    bool& byteState(uintptr_t address)
+    {
+        uintptr_t byteIndex = address - basePage * pageSize();
+        return memoryMap->at(byteIndex);
+    }
+    
+    bool& pageState(uintptr_t page)
+    {
+        uintptr_t pageIndex = page - basePage;
+        return pageMap->at(pageIndex);
+    }
+
+    // Test helpers
+
+    void testOneAlloc(size_t size)
+    {
+        // Tests the most basic behavior: allocate one thing and free it. Also
+        // verifies that the state of pages is correct.
+        
+        MetaAllocatorHandle* handle = allocate(size);
+        EXPECT_EQ(handle->start(), reinterpret_cast<void*>(basePage * pageSize()));
+        EXPECT_EQ(handle->sizeInBytes(), size);
+        EXPECT_TRUE(pageState(basePage));
+        
+        confirm(handle);
+        confirmHighWatermark(handle);
+        
+        free(handle);
+    }
+    
+    void testRepeatAllocFree(size_t firstSize, ...)
+    {
+        // Tests right-coalescing by repeatedly allocating and freeing. The idea
+        // is that if you allocate something and then free it, then the heap should
+        // look identical to what it was before the allocation due to a right-coalesce
+        // of the freed chunk and the already-free memory, and so subsequent
+        // allocations should behave the same as the first one.
+        
+        MetaAllocatorHandle* handle = allocate(firstSize);
+        EXPECT_EQ(handle->start(), reinterpret_cast<void*>(basePage * pageSize()));
+        EXPECT_EQ(handle->sizeInBytes(), firstSize);
+        
+        confirm(handle);
+        confirmHighWatermark(handle);
+        
+        free(handle);
+        
+        va_list argList;
+        va_start(argList, firstSize);
+        while (size_t sizeInBytes = va_arg(argList, int)) {
+            handle = allocate(sizeInBytes);
+            EXPECT_EQ(handle->start(), reinterpret_cast<void*>(basePage * pageSize()));
+            EXPECT_EQ(handle->sizeInBytes(), sizeInBytes);
+            
+            confirm(handle);
+            confirmHighWatermark(handle);
+            
+            free(handle);
+        }
+        va_end(argList);
+    }
+    
+    void testSimpleFullCoalesce(size_t firstSize, size_t secondSize, size_t thirdSize)
+    {
+        // Allocates something of size firstSize, then something of size secondSize, and then
+        // frees the first allocation, and then the second, and then attempts to allocate the
+        // third, asserting that it allocated at the base address of the heap.
+        
+        // Note that this test may cause right-allocation, which will cause the test to fail.
+        // Thus the correct way of running this test is to ensure that secondSize is
+        // picked in such a way that it never straddles a page.
+        
+        MetaAllocatorHandle* firstHandle = allocate(firstSize);
+        EXPECT_EQ(firstHandle->start(), reinterpret_cast<void*>(basePage * pageSize()));
+        EXPECT_EQ(firstHandle->sizeInBytes(), firstSize);
+        
+        confirm(firstHandle);
+        confirmHighWatermark(firstHandle);
+
+        MetaAllocatorHandle* secondHandle = allocate(secondSize);
+        EXPECT_EQ(secondHandle->start(), reinterpret_cast<void*>(basePage * pageSize() + firstSize));
+        EXPECT_EQ(secondHandle->sizeInBytes(), secondSize);
+        
+        confirm(firstHandle);
+        confirm(secondHandle);
+        confirmHighWatermark(secondHandle);
+        
+        free(firstHandle);
+        
+        confirm(secondHandle);
+        confirmHighWatermark(secondHandle);
+        
+        free(secondHandle);
+        
+        confirm(basePage * pageSize(), (basePage + defaultPagesInHeap) * pageSize(), false);
+        
+        MetaAllocatorHandle* thirdHandle = allocate(thirdSize);
+        EXPECT_EQ(thirdHandle->start(), reinterpret_cast<void*>(basePage * pageSize()));
+        EXPECT_EQ(thirdHandle->sizeInBytes(), thirdSize);
+        
+        confirm(thirdHandle);
+        confirmHighWatermark(thirdHandle);
+        
+        free(thirdHandle);
+    }
+    
+    enum TestFIFOAllocMode { FillAtEnd, EagerFill };
+    void testFIFOAlloc(TestFIFOAllocMode mode, ...)
+    {
+        // This will test the simple case of no-coalesce (freeing the left-most
+        // chunk in memory when the chunk to the right of it is allocated) and
+        // fully exercise left-coalescing and full-coalescing. In EagerFill
+        // mode, this also tests perfect-fit allocation and no-coalescing free.
+        
+        size_t totalSize = 0;
+        
+        Vector<MetaAllocatorHandle*, 0> handles;
+        
+        va_list argList;
+        va_start(argList, mode);
+        while (size_t sizeInBytes = va_arg(argList, int)) {
+            MetaAllocatorHandle* handle = allocate(sizeInBytes);
+            EXPECT_EQ(handle->start(), reinterpret_cast<void*>(basePage * pageSize() + totalSize));
+            EXPECT_EQ(handle->sizeInBytes(), sizeInBytes);
+            
+            confirm(handle);
+            confirmHighWatermark(handle);
+            
+            handles.append(handle);
+            totalSize += sizeInBytes;
+        }
+        va_end(argList);
+        
+        for (unsigned index = 0; index < handles.size(); ++index)
+            confirm(handles.at(index));
+        
+        size_t sizeSoFar = 0;
+        for (unsigned index = 0; index < handles.size(); ++index) {
+            sizeSoFar += handles.at(index)->sizeInBytes();
+            free(handles.at(index));
+            if (mode == EagerFill) {
+                MetaAllocatorHandle* handle = allocate(sizeSoFar);
+                EXPECT_EQ(handle->start(), reinterpret_cast<void*>(basePage * pageSize()));
+                EXPECT_EQ(handle->sizeInBytes(), sizeSoFar);
+                
+                confirm(basePage * pageSize(), basePage * pageSize() + totalSize, true);
+                if (index < handles.size() - 1)
+                    confirmHighWatermark(handles.last());
+                else
+                    confirmHighWatermark(handle);
+                
+                free(handle);
+                
+                confirm(basePage * pageSize(), basePage * pageSize() + sizeSoFar, false);
+            }
+        }
+        
+        ASSERT(sizeSoFar == totalSize);
+        
+        confirm(basePage * pageSize(), (basePage + defaultPagesInHeap) * pageSize(), false);
+        
+        if (mode == FillAtEnd) {
+            MetaAllocatorHandle* finalHandle = allocate(totalSize);
+            EXPECT_EQ(finalHandle->start(), reinterpret_cast<void*>(basePage * pageSize()));
+            EXPECT_EQ(finalHandle->sizeInBytes(), totalSize);
+            
+            confirm(finalHandle);
+            confirmHighWatermark(finalHandle);
+            
+            free(finalHandle);
+        }
+    }
+    
+    void testFillHeap(size_t sizeInBytes, size_t numAllocations)
+    {
+        Vector<MetaAllocatorHandle*, 0> handles;
+        
+        for (size_t index = 0; index < numAllocations; ++index)
+            handles.append(allocate(sizeInBytes, DontRunSanityCheck));
+        
+        sanityCheck();
+        
+        EXPECT_TRUE(!allocator->allocate(sizeInBytes));
+        
+        for (size_t index = 0; index < numAllocations; ++index)
+            free(handles.at(index), DontRunSanityCheck);
+        
+        sanityCheck();
+    }
+    
+    void testRightAllocation(size_t firstLeftSize, size_t firstRightSize, size_t secondLeftSize, size_t secondRightSize)
+    {
+        MetaAllocatorHandle* firstLeft = allocate(firstLeftSize);
+        EXPECT_EQ(firstLeft->start(), reinterpret_cast<void*>(basePage * pageSize()));
+        
+        MetaAllocatorHandle* firstRight = allocate(firstRightSize);
+        EXPECT_EQ(firstRight->end(), reinterpret_cast<void*>((basePage + defaultPagesInHeap) * pageSize()));
+        
+        MetaAllocatorHandle* secondLeft = allocate(secondLeftSize);
+        EXPECT_EQ(secondLeft->start(), reinterpret_cast<void*>(basePage * pageSize() + firstLeft->sizeInBytes()));
+        
+        MetaAllocatorHandle* secondRight = allocate(secondRightSize);
+        EXPECT_EQ(secondRight->end(), reinterpret_cast<void*>((basePage + defaultPagesInHeap) * pageSize() - firstRight->sizeInBytes()));
+        
+        free(firstLeft);
+        free(firstRight);
+        free(secondLeft);
+        free(secondRight);
+        
+        MetaAllocatorHandle* final = allocate(defaultPagesInHeap * pageSize());
+        EXPECT_EQ(final->start(), reinterpret_cast<void*>(basePage * pageSize()));
+        
+        free(final);
+    }
+    
+    void testBestFit(size_t firstSize, size_t step, unsigned numSlots, SanityCheckMode sanityCheckMode)
+    {
+        Vector<MetaAllocatorHandle*, 0> handlesToFree;
+        Vector<MetaAllocatorHandle*, 0> handles;
+        Vector<void*, 0> locations;
+        
+        size_t size = firstSize;
+        for (unsigned index = 0; index < numSlots; ++index) {
+            MetaAllocatorHandle* toFree = allocate(size, sanityCheckMode);
+            if (!handles.isEmpty()) {
+                while (toFree->start() != handles.last()->end()) {
+                    handlesToFree.append(toFree);
+                    toFree = allocate(size, sanityCheckMode);
+                }
+            }
+
+            MetaAllocatorHandle* fragger = allocate(32, sanityCheckMode);
+            EXPECT_EQ(fragger->start(), toFree->end());
+            
+            locations.append(toFree->start());
+
+            handlesToFree.append(toFree);
+            handles.append(fragger);
+            
+            size += step;
+        }
+        
+        ASSERT(locations.size() == numSlots);
+        
+        for (unsigned index = 0; index < handlesToFree.size(); ++index)
+            free(handlesToFree.at(index), sanityCheckMode);
+        
+        size = firstSize;
+        for (unsigned index = 0; index < numSlots; ++index) {
+            MetaAllocatorHandle* bestFit = allocate(size - 32, sanityCheckMode);
+            
+            EXPECT_TRUE(bestFit->start() == locations.at(index)
+                        || bestFit->end() == reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(locations.at(index)) + size));
+            
+            MetaAllocatorHandle* small = allocate(32, sanityCheckMode);
+            if (bestFit->start() == locations.at(index))
+                EXPECT_EQ(small->start(), bestFit->end());
+            else
+                EXPECT_EQ(small->end(), bestFit->start());
+            
+            free(bestFit, sanityCheckMode);
+            free(small, sanityCheckMode);
+            
+            size += step;
+        }
+        
+        for (unsigned index = 0; index < numSlots; ++index)
+            free(handles.at(index), sanityCheckMode);
+        
+        MetaAllocatorHandle* final = allocate(defaultPagesInHeap * pageSize(), sanityCheckMode);
+        EXPECT_EQ(final->start(), reinterpret_cast<void*>(basePage * pageSize()));
+        
+        free(final, sanityCheckMode);
+    }
+    
+    void testShrink(size_t firstSize, size_t secondSize)
+    {
+        // Allocate the thing that will be shrunk
+        MetaAllocatorHandle* handle = allocate(firstSize);
+        
+        // Shrink it, and make sure that our state reflects the shrinkage.
+        notifyFree(reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(handle->start()) + secondSize), firstSize - secondSize);
+        
+        handle->shrink(secondSize);
+        EXPECT_EQ(handle->sizeInBytes(), secondSize);
+        
+        sanityCheck();
+        
+        // Assert that the heap is not empty.
+        EXPECT_TRUE(!allocator->allocate(defaultPagesInHeap * pageSize()));
+        
+        // Allocate the remainder of the heap.
+        MetaAllocatorHandle* remainder = allocate(defaultPagesInHeap * pageSize() - secondSize);
+        EXPECT_EQ(remainder->start(), handle->end());
+        
+        free(remainder);
+        free(handle);
+        
+        // Assert that the heap is empty and finish up.
+        MetaAllocatorHandle* final = allocate(defaultPagesInHeap * pageSize());
+        EXPECT_EQ(final->start(), reinterpret_cast<void*>(basePage * pageSize()));
+        
+        free(final);
+    }
+    
+    void testDemandAllocCoalesce(size_t firstSize, size_t numPages, size_t secondSize)
+    {
+        EXPECT_TRUE(!allocator->allocate((defaultPagesInHeap + numPages) * pageSize()));
+        
+        MetaAllocatorHandle* firstHandle = allocate(firstSize);
+        
+        EXPECT_TRUE(!allocator->allocate(secondSize));
+        EXPECT_TRUE(!allocator->allocate((defaultPagesInHeap + numPages) * pageSize()));
+        
+        currentHeapGrowthMode = ForTestDemandAllocCoalesce;
+        allowAllocatePages = numPages;
+        
+        MetaAllocatorHandle* secondHandle = allocate(secondSize);
+        
+        EXPECT_TRUE(currentHeapGrowthMode == DontGrowHeap);
+        EXPECT_EQ(allowAllocatePages, static_cast<size_t>(0));
+        EXPECT_EQ(requestedNumPages, (secondSize + pageSize() - 1) / pageSize());
+        EXPECT_EQ(secondHandle->start(), reinterpret_cast<void*>((basePage + defaultPagesInHeap) * pageSize()));
+        
+        requestedNumPages = 0;
+        
+        free(firstHandle);
+        free(secondHandle);
+        
+        free(allocate((defaultPagesInHeap + numPages) * pageSize()));
+    }
+    
+    void testDemandAllocDontCoalesce(size_t firstSize, size_t numPages, size_t secondSize)
+    {
+        free(allocate(firstSize));
+        free(allocate(defaultPagesInHeap * pageSize()));
+        EXPECT_TRUE(!allocator->allocate((defaultPagesInHeap + numPages) * pageSize()));
+        
+        MetaAllocatorHandle* firstHandle = allocate(firstSize);
+        
+        EXPECT_TRUE(!allocator->allocate(secondSize));
+        EXPECT_TRUE(!allocator->allocate((defaultPagesInHeap + numPages) * pageSize()));
+        
+        currentHeapGrowthMode = ForTestDemandAllocDontCoalesce;
+        allowAllocatePages = numPages;
+        
+        MetaAllocatorHandle* secondHandle = allocate(secondSize);
+        
+        EXPECT_TRUE(currentHeapGrowthMode == DontGrowHeap);
+        EXPECT_EQ(allowAllocatePages, static_cast<size_t>(0));
+        EXPECT_EQ(requestedNumPages, (secondSize + pageSize() - 1) / pageSize());
+        EXPECT_EQ(secondHandle->start(), reinterpret_cast<void*>((basePage + defaultPagesInHeap + 1) * pageSize()));
+        
+        requestedNumPages = 0;
+        
+        EXPECT_TRUE(!allocator->allocate((defaultPagesInHeap + numPages) * pageSize()));
+
+        free(firstHandle);
+        free(secondHandle);
+        
+        EXPECT_TRUE(!allocator->allocate((defaultPagesInHeap + numPages) * pageSize()));
+        
+        firstHandle = allocate(firstSize);
+        secondHandle = allocate(secondSize);
+        EXPECT_EQ(firstHandle->start(), reinterpret_cast<void*>(basePage * pageSize()));
+        EXPECT_EQ(secondHandle->start(), reinterpret_cast<void*>((basePage + defaultPagesInHeap + 1) * pageSize()));
+        free(firstHandle);
+        free(secondHandle);
+    }
+};
+
+TEST_F(MetaAllocatorTest, Empty)
+{
+    // Tests that creating and destroying an allocator works.
+}
+
+TEST_F(MetaAllocatorTest, AllocZero)    
+{
+    // Tests that allocating a zero-length block returns 0 and
+    // does not change anything in memory.
+    
+    ASSERT(!allocator->allocate(0));
+    
+    MetaAllocatorHandle* final = allocate(defaultPagesInHeap * pageSize());
+    EXPECT_EQ(final->start(), reinterpret_cast<void*>(basePage * pageSize()));
+    free(final);
+}
+
+TEST_F(MetaAllocatorTest, OneAlloc32)
+{
+    testOneAlloc(32);
+}
+
+TEST_F(MetaAllocatorTest, OneAlloc64)
+{
+    testOneAlloc(64);
+}
+
+TEST_F(MetaAllocatorTest, OneAllocTwoPages)
+{
+    testOneAlloc(pageSize() * 2);
+}
+
+TEST_F(MetaAllocatorTest, RepeatAllocFree32Twice)
+{
+    testRepeatAllocFree(32, 32, 0);
+}
+
+TEST_F(MetaAllocatorTest, RepeatAllocFree32Then64)
+{
+    testRepeatAllocFree(32, 64, 0);
+}
+
+TEST_F(MetaAllocatorTest, RepeatAllocFree64Then32)
+{
+    testRepeatAllocFree(64, 32, 0);
+}
+
+TEST_F(MetaAllocatorTest, RepeatAllocFree32TwiceThen64)
+{
+    testRepeatAllocFree(32, 32, 64, 0);
+}
+
+TEST_F(MetaAllocatorTest, RepeatAllocFree32Then64Twice)
+{
+    testRepeatAllocFree(32, 64, 64, 0);
+}
+
+TEST_F(MetaAllocatorTest, RepeatAllocFree64Then32Then64)
+{
+    testRepeatAllocFree(64, 32, 64, 0);
+}
+
+TEST_F(MetaAllocatorTest, RepeatAllocFree32Thrice)
+{
+    testRepeatAllocFree(32, 32, 32, 0);
+}
+
+TEST_F(MetaAllocatorTest, RepeatAllocFree32Then64Then32)
+{
+    testRepeatAllocFree(32, 32, 32, 0);
+}
+
+TEST_F(MetaAllocatorTest, RepeatAllocFree64Then32Twice)
+{
+    testRepeatAllocFree(64, 32, 32, 0);
+}
+
+TEST_F(MetaAllocatorTest, RepeatAllocFreeTwoPagesThen32)
+{
+    testRepeatAllocFree(static_cast<int>(pageSize() * 2), 32, 0);
+}
+
+TEST_F(MetaAllocatorTest, RepeatAllocFree32ThenTwoPages)
+{
+    testRepeatAllocFree(32, static_cast<int>(pageSize() * 2), 0);
+}
+
+TEST_F(MetaAllocatorTest, RepeatAllocFreePageThenTwoPages)
+{
+    testRepeatAllocFree(static_cast<int>(pageSize()), static_cast<int>(pageSize() * 2), 0);
+}
+
+TEST_F(MetaAllocatorTest, RepeatAllocFreeTwoPagesThenPage)
+{
+    testRepeatAllocFree(static_cast<int>(pageSize() * 2), static_cast<int>(pageSize()), 0);
+}
+
+TEST_F(MetaAllocatorTest, SimpleFullCoalesce32Plus32Then128)
+{
+    testSimpleFullCoalesce(32, 32, 128);
+}
+
+TEST_F(MetaAllocatorTest, SimpleFullCoalesce32Plus64Then128)
+{
+    testSimpleFullCoalesce(32, 64, 128);
+}
+
+TEST_F(MetaAllocatorTest, SimpleFullCoalesce64Plus32Then128)
+{
+    testSimpleFullCoalesce(64, 32, 128);
+}
+
+TEST_F(MetaAllocatorTest, SimpleFullCoalesce32PlusPageLess32ThenPage)
+{
+    testSimpleFullCoalesce(32, pageSize() - 32, pageSize());
+}
+
+TEST_F(MetaAllocatorTest, SimpleFullCoalesce32PlusPageLess32ThenTwoPages)
+{
+    testSimpleFullCoalesce(32, pageSize() - 32, pageSize() * 2);
+}
+
+TEST_F(MetaAllocatorTest, SimpleFullCoalescePagePlus32ThenTwoPages)
+{
+    testSimpleFullCoalesce(pageSize(), 32, pageSize() * 2);
+}
+
+TEST_F(MetaAllocatorTest, SimpleFullCoalescePagePlusPageThenTwoPages)
+{
+    testSimpleFullCoalesce(pageSize(), pageSize(), pageSize() * 2);
+}
+
+TEST_F(MetaAllocatorTest, FIFOAllocFillAtEnd32Twice)
+{
+    testFIFOAlloc(FillAtEnd, 32, 32, 0);
+}
+
+TEST_F(MetaAllocatorTest, FIFOAllocFillAtEnd32Thrice)
+{
+    testFIFOAlloc(FillAtEnd, 32, 32, 32, 0);
+}
+
+TEST_F(MetaAllocatorTest, FIFOAllocFillAtEnd32FourTimes)
+{
+    testFIFOAlloc(FillAtEnd, 32, 32, 32, 32, 0);
+}
+
+TEST_F(MetaAllocatorTest, FIFOAllocFillAtEndPageLess32Then32ThenPageLess64Then64)
+{
+    testFIFOAlloc(FillAtEnd, static_cast<int>(pageSize() - 32), 32, static_cast<int>(pageSize() - 64), 64, 0);
+}
+
+TEST_F(MetaAllocatorTest, FIFOAllocEagerFill32Twice)
+{
+    testFIFOAlloc(EagerFill, 32, 32, 0);
+}
+
+TEST_F(MetaAllocatorTest, FIFOAllocEagerFill32Thrice)
+{
+    testFIFOAlloc(EagerFill, 32, 32, 32, 0);
+}
+
+TEST_F(MetaAllocatorTest, FIFOAllocEagerFill32FourTimes)
+{
+    testFIFOAlloc(EagerFill, 32, 32, 32, 32, 0);
+}
+
+TEST_F(MetaAllocatorTest, FIFOAllocEagerFillPageLess32Then32ThenPageLess64Then64)
+{
+    testFIFOAlloc(EagerFill, static_cast<int>(pageSize() - 32), 32, static_cast<int>(pageSize() - 64), 64, 0);
+}
+
+TEST_F(MetaAllocatorTest, FillHeap32)
+{
+    testFillHeap(32, defaultPagesInHeap * pageSize() / 32);
+}
+
+TEST_F(MetaAllocatorTest, FillHeapPage)
+{
+    testFillHeap(pageSize(), defaultPagesInHeap);
+}
+
+TEST_F(MetaAllocatorTest, FillHeapTwoPages)
+{
+    testFillHeap(pageSize() * 2, defaultPagesInHeap / 2);
+}
+
+TEST_F(MetaAllocatorTest, RightAllocation32ThenPageThen32ThenPage)
+{
+    testRightAllocation(32, pageSize(), 32, pageSize());
+}
+
+TEST_F(MetaAllocatorTest, RightAllocationQuarterPageThenPageThenQuarterPageThenPage)
+{
+    testRightAllocation(pageSize() / 4, pageSize(), pageSize() / 4, pageSize());
+}
+
+TEST_F(MetaAllocatorTest, BestFit64Plus64Thrice)
+{
+    testBestFit(64, 64, 3, RunSanityCheck);
+}
+
+TEST_F(MetaAllocatorTest, BestFit64Plus64TenTimes)
+{
+    testBestFit(64, 64, 10, DontRunSanityCheck);
+}
+
+TEST_F(MetaAllocatorTest, BestFit64Plus64HundredTimes)
+{
+    testBestFit(64, 64, 100, DontRunSanityCheck);
+}
+
+TEST_F(MetaAllocatorTest, BestFit96Plus64Thrice)
+{
+    testBestFit(96, 64, 3, RunSanityCheck);
+}
+
+TEST_F(MetaAllocatorTest, BestFit96Plus64TenTimes)
+{
+    testBestFit(96, 64, 10, DontRunSanityCheck);
+}
+
+TEST_F(MetaAllocatorTest, BestFit96Plus64HundredTimes)
+{
+    testBestFit(96, 64, 100, DontRunSanityCheck);
+}
+
+TEST_F(MetaAllocatorTest, BestFit96Plus96Thrice)
+{
+    testBestFit(96, 96, 3, RunSanityCheck);
+}
+
+TEST_F(MetaAllocatorTest, BestFit96Plus96TenTimes)
+{
+    testBestFit(96, 96, 10, DontRunSanityCheck);
+}
+
+TEST_F(MetaAllocatorTest, BestFit96Plus96EightyTimes)
+{
+    testBestFit(96, 96, 80, DontRunSanityCheck);
+}
+
+TEST_F(MetaAllocatorTest, Shrink64To32)
+{
+    testShrink(64, 32);
+}
+
+TEST_F(MetaAllocatorTest, ShrinkPageTo32)
+{
+    testShrink(pageSize(), 32);
+}
+
+TEST_F(MetaAllocatorTest, ShrinkPageToPageLess32)
+{
+    testShrink(pageSize(), pageSize() - 32);
+}
+
+TEST_F(MetaAllocatorTest, ShrinkTwoPagesTo32)
+{
+    testShrink(pageSize() * 2, 32);
+}
+
+TEST_F(MetaAllocatorTest, ShrinkTwoPagesToPagePlus32)
+{
+    testShrink(pageSize() * 2, pageSize() + 32);
+}
+
+TEST_F(MetaAllocatorTest, ShrinkTwoPagesToPage)
+{
+    testShrink(pageSize() * 2, pageSize());
+}
+
+TEST_F(MetaAllocatorTest, ShrinkTwoPagesToPageLess32)
+{
+    testShrink(pageSize() * 2, pageSize() - 32);
+}
+
+TEST_F(MetaAllocatorTest, ShrinkTwoPagesToTwoPagesLess32)
+{
+    testShrink(pageSize() * 2, pageSize() * 2 - 32);
+}
+
+TEST_F(MetaAllocatorTest, DemandAllocCoalescePageThenDoubleHeap)
+{
+    testDemandAllocCoalesce(pageSize(), defaultPagesInHeap, defaultPagesInHeap * pageSize());
+}
+
+TEST_F(MetaAllocatorTest, DemandAllocCoalescePageThenTripleHeap)
+{
+    testDemandAllocCoalesce(pageSize(), defaultPagesInHeap * 2, defaultPagesInHeap * pageSize());
+}
+
+TEST_F(MetaAllocatorTest, DemandAllocDontCoalescePageThenDoubleHeap)
+{
+    testDemandAllocDontCoalesce(pageSize(), defaultPagesInHeap, defaultPagesInHeap * pageSize());
+}
+
+} // namespace TestWebKitAPI
diff --git a/Tools/TestWebKitAPI/Tests/WTF/RedBlackTree.cpp b/Tools/TestWebKitAPI/Tests/WTF/RedBlackTree.cpp
new file mode 100644 (file)
index 0000000..5a13bec
--- /dev/null
@@ -0,0 +1,304 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1.  Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer. 
+ * 2.  Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in the
+ *     documentation and/or other materials provided with the distribution. 
+ * 3.  Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ *     its contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission. 
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <JavaScriptCore/RedBlackTree.h>
+#include <JavaScriptCore/Vector.h>
+
+using namespace WTF;
+
+namespace TestWebKitAPI {
+
+class RedBlackTreeTest: public testing::Test {
+public:
+    unsigned m_counter;
+    
+    virtual void SetUp()
+    {
+        m_counter = 0;
+    }
+    
+    virtual void TearDown()
+    {
+    }
+    
+    struct Pair {
+        char key;
+        unsigned value;
+        
+        Pair() { }
+        
+        Pair(char key, unsigned value)
+            : key(key)
+            , value(value)
+        {
+        }
+        
+        bool operator==(const Pair& other) const
+        {
+            return key == other.key;
+        }
+        
+        bool operator<(const Pair& other) const
+        {
+            return key < other.key;
+        }
+    };
+    
+    typedef Vector<Pair, 16> PairVector;
+    
+    PairVector findExact(PairVector& asVector, char key)
+    {
+        PairVector result;
+        
+        for (size_t index = 0; index < asVector.size(); ++index) {
+            if (asVector.at(index).key == key)
+                result.append(asVector.at(index));
+        }
+        
+        std::sort(result.begin(), result.end());
+        
+        return result;
+    }
+    
+    void remove(PairVector& asVector, size_t index)
+    {
+        asVector.at(index) = asVector.last();
+        asVector.removeLast();
+    }
+    
+    PairVector findLeastGreaterThanOrEqual(PairVector& asVector, char key)
+    {
+        char bestKey = 0; // assignment to make gcc happy
+        bool foundKey = false;
+        
+        for (size_t index = 0; index < asVector.size(); ++index) {
+            if (asVector.at(index).key >= key) {
+                if (asVector.at(index).key < bestKey || !foundKey) {
+                    foundKey = true;
+                    bestKey = asVector.at(index).key;
+                }
+            }
+        }
+        
+        PairVector result;
+        
+        if (!foundKey)
+            return result;
+        
+        return findExact(asVector, bestKey);
+    }
+    
+    void assertFoundAndRemove(PairVector& asVector, char key, unsigned value)
+    {
+            bool found = false;
+            size_t foundIndex = 0; // make compilers happy
+            
+            for (size_t index = 0; index < asVector.size(); ++index) {
+                if (asVector.at(index).key == key
+                    && asVector.at(index).value == value) {
+                    EXPECT_TRUE(!found);
+                    
+                    found = true;
+                    foundIndex = index;
+                }
+            }
+            
+            EXPECT_TRUE(found);
+            
+            remove(asVector, foundIndex);
+    }
+    
+    // This deliberately passes a copy of the vector.
+    void assertEqual(RedBlackTree<char, unsigned>& asTree, PairVector asVector)
+    {
+        for (RedBlackTree<char, unsigned>::Node* current = asTree.first(); current; current = current->successor())
+            assertFoundAndRemove(asVector, current->m_key, current->m_value);
+    }
+    
+    void assertSameValuesForKey(RedBlackTree<char, unsigned>& asTree, RedBlackTree<char, unsigned>::Node* node, PairVector foundValues, char key)
+    {
+        if (node) {
+            EXPECT_EQ(node->m_key, key);
+            
+            RedBlackTree<char, unsigned>::Node* prevNode = node;
+            do {
+                node = prevNode;
+                prevNode = prevNode->predecessor();
+            } while (prevNode && prevNode->m_key == key);
+            
+            EXPECT_EQ(node->m_key, key);
+            EXPECT_TRUE(!prevNode || prevNode->m_key < key);
+            
+            do {
+                assertFoundAndRemove(foundValues, node->m_key, node->m_value);
+                
+                node = node->successor();
+                EXPECT_TRUE(!node || node->m_key >= key);
+            } while (node && node->m_key == key);
+        }
+        
+        EXPECT_TRUE(foundValues.isEmpty());
+    }
+    
+    // The control string is a null-terminated list of commands. Each
+    // command is two characters, with the first identifying the operation
+    // and the second giving a key. The commands are:
+    //  +x  Add x
+    //  *x  Find all elements equal to x
+    //  @x  Find all elements that have the smallest key that is greater than or equal to x
+    //  !x  Remove all elements equal to x
+    void testDriver(const char* controlString)
+    {
+        PairVector asVector;
+        RedBlackTree<char, unsigned> asTree;
+        
+        for (const char* current = controlString; *current; current += 2) {
+            char command = current[0];
+            char key = current[1];
+            unsigned value = ++m_counter;
+            
+            ASSERT(command);
+            ASSERT(key);
+            
+            switch (command) {
+            case '+': {
+                RedBlackTree<char, unsigned>::Node* node = new RedBlackTree<char, unsigned>::Node(key, value);
+                asTree.insert(node);
+                asVector.append(Pair(key, value));
+                break;
+            }
+                
+            case '*': {
+                RedBlackTree<char, unsigned>::Node* node = asTree.findExact(key);
+                if (node)
+                    EXPECT_EQ(node->m_key, key);
+                assertSameValuesForKey(asTree, node, findExact(asVector, key), key);
+                break;
+            }
+
+            case '@': {
+                RedBlackTree<char, unsigned>::Node* node = asTree.findLeastGreaterThanOrEqual(key);
+                if (node) {
+                    EXPECT_TRUE(node->m_key >= key);
+                    assertSameValuesForKey(asTree, node, findLeastGreaterThanOrEqual(asVector, key), node->m_key);
+                } else
+                    EXPECT_TRUE(findLeastGreaterThanOrEqual(asVector, key).isEmpty());
+                break;
+            }
+                
+            case '!': {
+                while (true) {
+                    RedBlackTree<char, unsigned>::Node* node = asTree.remove(key);
+                    if (node) {
+                        EXPECT_EQ(node->m_key, key);
+                        assertFoundAndRemove(asVector, node->m_key, node->m_value);
+                    } else {
+                        EXPECT_TRUE(findExact(asVector, key).isEmpty());
+                        break;
+                    }
+                }
+                break;
+            }
+                
+            default:
+                ASSERT_NOT_REACHED();
+                break;
+            }
+            
+            EXPECT_EQ(asTree.size(), asVector.size());
+            assertEqual(asTree, asVector);
+        }
+    }
+};
+
+TEST_F(RedBlackTreeTest, Empty)
+{
+    testDriver("");
+}
+
+TEST_F(RedBlackTreeTest, EmptyGetFindRemove)
+{
+    testDriver("*x@y!z");
+}
+
+TEST_F(RedBlackTreeTest, SingleAdd)
+{
+    testDriver("+a");
+}
+
+TEST_F(RedBlackTreeTest, SingleAddGetFindRemoveNotFound)
+{
+    testDriver("+a*x@y!z");
+}
+
+TEST_F(RedBlackTreeTest, SingleAddGetFindRemove)
+{
+    testDriver("+a*a@a!a");
+}
+
+TEST_F(RedBlackTreeTest, TwoAdds)
+{
+    testDriver("+a+b");
+}
+
+TEST_F(RedBlackTreeTest, ThreeAdds)
+{
+    testDriver("+a+b+c");
+}
+
+TEST_F(RedBlackTreeTest, FourAdds)
+{
+    testDriver("+a+b+c+d");
+}
+
+TEST_F(RedBlackTreeTest, LotsOfRepeatAdds)
+{
+    testDriver("+a+b+c+d+a+b+c+d+a+b+c+d+a+b+c+d+a+b+c+d+a+b+c+d+a+b+c+d+a+b+c+d+a+b+c+d+a+b+c+d+a+b+c+d+a+b+c+d");
+}
+
+TEST_F(RedBlackTreeTest, LotsOfRepeatAndUniqueAdds)
+{
+    testDriver("+a+b+c+d+a+b+c+d+a+b+c+d+a+b+c+d+a+b+c+d+a+b+c+d+a+b+c+d+a+b+c+d+a+b+c+d+a+b+c+d+a+b+c+d+a+b+c+d+e+f+g+h+i+j+k+l+m+n+o+p+q+r+s+t+u+v+x+y+z");
+}
+
+TEST_F(RedBlackTreeTest, LotsOfRepeatAndUniqueAddsAndGetsAndRemoves)
+{
+    testDriver("+a+b+c+d+a+b+c+d+a+b+c+d+a+b+c+d+a+b+c+d+a+b+c+d+a+b+c+d+a+b+c+d+a+b+c+d+a+b+c+d+a+b+c+d+a+b+c+d+e+f+g+h+i+j+k+l+m+n+o+p+q+r+s+t+u+v+x+y+z*a*b*c*d*e*f*g*h*i*j*k*l*m*n*o*p*q*r*s*t*u*v*w*x*y*z!a!b!c!d!e!f!g!h!i!j!k!l!m!n!o!p!q!r!s!t!u!v!w!x!y!z");
+}
+
+TEST_F(RedBlackTreeTest, SimpleBestFitSearch)
+{
+    testDriver("+d+d+m+w@d@m@w@a@g@q");
+}
+
+TEST_F(RedBlackTreeTest, BiggerBestFitSearch)
+{
+    testDriver("+d+d+d+d+d+d+d+d+d+d+f+f+f+f+f+f+f+h+h+i+j+k+l+m+o+p+q+r+z@a@b@c@d@e@f@g@h@i@j@k@l@m@n@o@p@q@r@s@t@u@v@w@x@y@z");
+}
+
+} // namespace TestWebKitAPI