Unreviewed, rolling out r197955.
[WebKit-https.git] / Source / bmalloc / bmalloc / Heap.cpp
index a13cbe1..c6cc510 100644 (file)
 #include "BumpAllocator.h"
 #include "LargeChunk.h"
 #include "LargeObject.h"
-#include "Line.h"
-#include "Page.h"
 #include "PerProcess.h"
 #include "SmallChunk.h"
+#include "SmallLine.h"
+#include "SmallPage.h"
 #include <thread>
 
 namespace bmalloc {
@@ -54,12 +54,12 @@ void Heap::initializeLineMetadata()
         size_t object = 0;
         size_t line = 0;
         while (object < vmPageSize) {
-            line = object / SmallPage::lineSize;
-            size_t leftover = object % SmallPage::lineSize;
+            line = object / smallLineSize;
+            size_t leftover = object % smallLineSize;
 
             size_t objectCount;
             size_t remainder;
-            divideRoundingUp(SmallPage::lineSize - leftover, size, objectCount, remainder);
+            divideRoundingUp(smallLineSize - leftover, size, objectCount, remainder);
 
             metadata[line] = { static_cast<unsigned short>(leftover), static_cast<unsigned short>(objectCount) };
 
@@ -86,13 +86,14 @@ void Heap::scavenge(std::unique_lock<StaticMutex>& lock, std::chrono::millisecon
 
     scavengeSmallPages(lock, sleepDuration);
     scavengeLargeObjects(lock, sleepDuration);
+    scavengeXLargeObjects(lock, sleepDuration);
 
     sleep(lock, sleepDuration);
 }
 
 void Heap::scavengeSmallPages(std::unique_lock<StaticMutex>& lock, std::chrono::milliseconds sleepDuration)
 {
-    while (m_smallPages.size()) {
+    while (!m_smallPages.isEmpty()) {
         m_vmHeap.deallocateSmallPage(lock, m_smallPages.pop());
         waitUntilFalse(lock, sleepDuration, m_isAllocatingPages);
     }
@@ -106,6 +107,22 @@ void Heap::scavengeLargeObjects(std::unique_lock<StaticMutex>& lock, std::chrono
     }
 }
 
+void Heap::scavengeXLargeObjects(std::unique_lock<StaticMutex>& lock, std::chrono::milliseconds sleepDuration)
+{
+    while (XLargeRange range = m_xLargeMap.takePhysical()) {
+        lock.unlock();
+        vmDeallocatePhysicalPagesSloppy(range.begin(), range.size());
+        lock.lock();
+        
+        range.setVMState(VMState::Virtual);
+        m_xLargeMap.addVirtual(range);
+
+        waitUntilFalse(lock, sleepDuration, m_isAllocatingPages);
+    }
+
+    m_xLargeMap.shrinkToFit();
+}
+
 void Heap::allocateSmallBumpRanges(std::lock_guard<StaticMutex>& lock, size_t sizeClass, BumpAllocator& allocator, BumpRangeCache& rangeCache)
 {
     BASSERT(!rangeCache.size());
@@ -114,7 +131,7 @@ void Heap::allocateSmallBumpRanges(std::lock_guard<StaticMutex>& lock, size_t si
     BASSERT(page->hasFreeLines(lock));
 
     // Find a free line.
-    for (size_t lineNumber = 0; lineNumber < SmallPage::lineCount; ++lineNumber) {
+    for (size_t lineNumber = 0; lineNumber < smallLineCount; ++lineNumber) {
         if (lines[lineNumber].refCount(lock))
             continue;
 
@@ -125,6 +142,7 @@ void Heap::allocateSmallBumpRanges(std::lock_guard<StaticMutex>& lock, size_t si
         // In a fragmented page, some free ranges might not fit in the cache.
         if (rangeCache.size() == rangeCache.capacity()) {
             m_smallPagesWithFreeLines[sizeClass].push(page);
+            BASSERT(allocator.canAllocate());
             return;
         }
 
@@ -134,7 +152,7 @@ void Heap::allocateSmallBumpRanges(std::lock_guard<StaticMutex>& lock, size_t si
         page->ref(lock);
 
         // Merge with subsequent free lines.
-        while (++lineNumber < SmallPage::lineCount) {
+        while (++lineNumber < smallLineCount) {
             if (lines[lineNumber].refCount(lock))
                 break;
 
@@ -153,21 +171,17 @@ void Heap::allocateSmallBumpRanges(std::lock_guard<StaticMutex>& lock, size_t si
             rangeCache.push({ begin, objectCount });
     }
 
+    BASSERT(allocator.canAllocate());
     page->setHasFreeLines(lock, false);
 }
 
 SmallPage* Heap::allocateSmallPage(std::lock_guard<StaticMutex>& lock, size_t sizeClass)
 {
-    Vector<SmallPage*>& smallPagesWithFreeLines = m_smallPagesWithFreeLines[sizeClass];
-    while (smallPagesWithFreeLines.size()) {
-        SmallPage* page = smallPagesWithFreeLines.pop();
-        if (!page->refCount(lock) || page->sizeClass() != sizeClass) // Page was promoted to the pages list.
-            continue;
-        return page;
-    }
+    if (!m_smallPagesWithFreeLines[sizeClass].isEmpty())
+        return m_smallPagesWithFreeLines[sizeClass].pop();
 
     SmallPage* page = [this, &lock]() {
-        if (m_smallPages.size())
+        if (!m_smallPages.isEmpty())
             return m_smallPages.pop();
 
         m_isAllocatingPages = true;
@@ -196,56 +210,11 @@ void Heap::deallocateSmallLine(std::lock_guard<StaticMutex>& lock, SmallLine* li
     if (page->refCount(lock))
         return;
 
+    m_smallPagesWithFreeLines[page->sizeClass()].remove(page);
     m_smallPages.push(page);
     m_scavenger.run();
 }
 
-void* Heap::allocateXLarge(std::lock_guard<StaticMutex>& lock, size_t alignment, size_t size)
-{
-    void* result = tryAllocateXLarge(lock, alignment, size);
-    RELEASE_BASSERT(result);
-    return result;
-}
-
-void* Heap::allocateXLarge(std::lock_guard<StaticMutex>& lock, size_t size)
-{
-    return allocateXLarge(lock, superChunkSize, size);
-}
-
-void* Heap::tryAllocateXLarge(std::lock_guard<StaticMutex>&, size_t alignment, size_t size)
-{
-    BASSERT(isPowerOfTwo(alignment));
-    BASSERT(alignment >= superChunkSize);
-    BASSERT(size == roundUpToMultipleOf<xLargeAlignment>(size));
-
-    void* result = tryVMAllocate(alignment, size);
-    if (!result)
-        return nullptr;
-    m_xLargeObjects.push(Range(result, size));
-    return result;
-}
-
-Range& Heap::findXLarge(std::unique_lock<StaticMutex>&, void* object)
-{
-    for (auto& range : m_xLargeObjects) {
-        if (range.begin() != object)
-            continue;
-        return range;
-    }
-
-    RELEASE_BASSERT(false);
-    return *static_cast<Range*>(nullptr); // Silence compiler error.
-}
-
-void Heap::deallocateXLarge(std::unique_lock<StaticMutex>& lock, void* object)
-{
-    Range toDeallocate = m_xLargeObjects.pop(&findXLarge(lock, object));
-
-    lock.unlock();
-    vmDeallocate(toDeallocate.begin(), toDeallocate.size());
-    lock.lock();
-}
-
 inline LargeObject& Heap::splitAndAllocate(LargeObject& largeObject, size_t size)
 {
     BASSERT(largeObject.isFree());
@@ -304,7 +273,7 @@ inline LargeObject& Heap::splitAndAllocate(LargeObject& largeObject, size_t alig
     return largeObject;
 }
 
-void* Heap::allocateLarge(std::lock_guard<StaticMutex>&, size_t size)
+void* Heap::allocateLarge(std::lock_guard<StaticMutex>& lock, size_t size)
 {
     BASSERT(size <= largeMax);
     BASSERT(size >= largeMin);
@@ -312,7 +281,7 @@ void* Heap::allocateLarge(std::lock_guard<StaticMutex>&, size_t size)
 
     LargeObject largeObject = m_largeObjects.take(size);
     if (!largeObject)
-        largeObject = m_vmHeap.allocateLargeObject(size);
+        largeObject = m_vmHeap.allocateLargeObject(lock, size);
 
     if (largeObject.vmState().hasVirtual()) {
         m_isAllocatingPages = true;
@@ -326,7 +295,7 @@ void* Heap::allocateLarge(std::lock_guard<StaticMutex>&, size_t size)
     return largeObject.begin();
 }
 
-void* Heap::allocateLarge(std::lock_guard<StaticMutex>&, size_t alignment, size_t size, size_t unalignedSize)
+void* Heap::allocateLarge(std::lock_guard<StaticMutex>& lock, size_t alignment, size_t size, size_t unalignedSize)
 {
     BASSERT(size <= largeMax);
     BASSERT(size >= largeMin);
@@ -340,7 +309,7 @@ void* Heap::allocateLarge(std::lock_guard<StaticMutex>&, size_t alignment, size_
 
     LargeObject largeObject = m_largeObjects.take(alignment, size, unalignedSize);
     if (!largeObject)
-        largeObject = m_vmHeap.allocateLargeObject(alignment, size, unalignedSize);
+        largeObject = m_vmHeap.allocateLargeObject(lock, alignment, size, unalignedSize);
 
     if (largeObject.vmState().hasVirtual()) {
         m_isAllocatingPages = true;
@@ -370,4 +339,102 @@ void Heap::deallocateLarge(std::lock_guard<StaticMutex>& lock, void* object)
     deallocateLarge(lock, largeObject);
 }
 
+void* Heap::allocateXLarge(std::lock_guard<StaticMutex>& lock, size_t alignment, size_t size)
+{
+    void* result = tryAllocateXLarge(lock, alignment, size);
+    RELEASE_BASSERT(result);
+    return result;
+}
+
+void* Heap::allocateXLarge(std::lock_guard<StaticMutex>& lock, size_t size)
+{
+    return allocateXLarge(lock, alignment, size);
+}
+
+XLargeRange Heap::splitAndAllocate(XLargeRange& range, size_t alignment, size_t size)
+{
+    XLargeRange prev;
+    XLargeRange next;
+
+    size_t alignmentMask = alignment - 1;
+    if (test(range.begin(), alignmentMask)) {
+        size_t prefixSize = roundUpToMultipleOf(alignment, range.begin()) - range.begin();
+        std::pair<XLargeRange, XLargeRange> pair = range.split(prefixSize);
+        prev = pair.first;
+        range = pair.second;
+    }
+
+    if (range.size() - size >= xLargeAlignment) {
+        size_t alignedSize = roundUpToMultipleOf<xLargeAlignment>(size);
+        std::pair<XLargeRange, XLargeRange> pair = range.split(alignedSize);
+        range = pair.first;
+        next = pair.second;
+    }
+
+    // At this point our range might contain an unused tail fragment. This is
+    // common. We can't allocate the tail fragment because it's aligned to less
+    // than xLargeAlignment. So, we pair the allocation with its tail fragment
+    // in the allocated list. This is an important optimization because it
+    // keeps the free list short, speeding up allocation and merging.
+
+    std::pair<XLargeRange, XLargeRange> allocated = range.split(roundUpToMultipleOf<vmPageSize>(size));
+    if (allocated.first.vmState().hasVirtual()) {
+        vmAllocatePhysicalPagesSloppy(allocated.first.begin(), allocated.first.size());
+        allocated.first.setVMState(VMState::Physical);
+    }
+
+    m_xLargeMap.addAllocated(prev, allocated, next);
+    return allocated.first;
+}
+
+void* Heap::tryAllocateXLarge(std::lock_guard<StaticMutex>&, size_t alignment, size_t size)
+{
+    BASSERT(isPowerOfTwo(alignment));
+    BASSERT(alignment < xLargeMax);
+
+    m_isAllocatingPages = true;
+
+    alignment = roundUpToMultipleOf<xLargeAlignment>(alignment);
+
+    XLargeRange range = m_xLargeMap.takeFree(alignment, size);
+    if (!range) {
+        // We allocate VM in aligned multiples to increase the chances that
+        // the OS will provide contiguous ranges that we can merge.
+        size_t alignedSize = roundUpToMultipleOf<xLargeAlignment>(size);
+
+        void* begin = tryVMAllocate(alignment, alignedSize);
+        if (!begin)
+            return nullptr;
+        range = XLargeRange(begin, alignedSize, VMState::Virtual);
+    }
+
+    return splitAndAllocate(range, alignment, size).begin();
+}
+
+size_t Heap::xLargeSize(std::unique_lock<StaticMutex>&, void* object)
+{
+    return m_xLargeMap.getAllocated(object).size();
+}
+
+void Heap::shrinkXLarge(std::unique_lock<StaticMutex>&, const Range& object, size_t newSize)
+{
+    BASSERT(object.size() > newSize);
+
+    if (object.size() - newSize < vmPageSize)
+        return;
+    
+    XLargeRange range = m_xLargeMap.takeAllocated(object.begin());
+    splitAndAllocate(range, xLargeAlignment, newSize);
+
+    m_scavenger.run();
+}
+
+void Heap::deallocateXLarge(std::unique_lock<StaticMutex>&, void* object)
+{
+    XLargeRange range = m_xLargeMap.takeAllocated(object);
+    m_xLargeMap.addFree(range);
+    
+    m_scavenger.run();
+}
+
 } // namespace bmalloc