bmalloc scavenger should know what page classes are allocating
authormsaboff@apple.com <msaboff@apple.com@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Fri, 28 Apr 2017 00:37:51 +0000 (00:37 +0000)
committermsaboff@apple.com <msaboff@apple.com@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Fri, 28 Apr 2017 00:37:51 +0000 (00:37 +0000)
https://bugs.webkit.org/show_bug.cgi?id=171384

Reviewed by Geoffrey Garen.

This change replaces m_isAllocatingPages with a per page class flag to track which page
classes are currently allocating.  When scavenging, we skip page classes that are actively
allocating and come back to them on a subsequent pass.  This reduces the amount of time it
takes for scavenger to free up pages as well as the total time it takes to handle all
page classes.

* bmalloc/Heap.cpp:
(bmalloc::Heap::Heap):
(bmalloc::Heap::concurrentScavenge):
(bmalloc::Heap::scavenge):
(bmalloc::Heap::scavengeSmallPages):
(bmalloc::Heap::scavengeLargeObjects):
(bmalloc::Heap::allocateSmallPage):
(bmalloc::Heap::splitAndAllocate):
(bmalloc::Heap::deallocateLarge):
* bmalloc/Heap.h:
(bmalloc::Heap::takeRequestedScavengerThreadQOSClass): Deleted.
* bmalloc/VMHeap.h:
(bmalloc::VMHeap::deallocateSmallPage):
* bmalloc/bmalloc.h:
(bmalloc::api::scavenge):

git-svn-id: https://svn.webkit.org/repository/webkit/trunk@215909 268f45cc-cd09-0410-ab3c-d52691b4dbfc

Source/bmalloc/ChangeLog
Source/bmalloc/bmalloc/Heap.cpp
Source/bmalloc/bmalloc/Heap.h
Source/bmalloc/bmalloc/VMHeap.h
Source/bmalloc/bmalloc/bmalloc.h

index e335db1..e38b8b1 100644 (file)
@@ -1,3 +1,32 @@
+2017-04-27  Michael Saboff  <msaboff@apple.com>
+
+        bmalloc scavenger should know what page classes are allocating
+        https://bugs.webkit.org/show_bug.cgi?id=171384
+
+        Reviewed by Geoffrey Garen.
+
+        This change replaces m_isAllocatingPages with a per page class flag to track which page
+        classes are currently allocating.  When scavenging, we skip page classes that are actively
+        allocating and come back to them on a subsequent pass.  This reduces the amount of time it
+        takes for scavenger to free up pages as well as the total time it takes to handle all
+        page classes.
+
+        * bmalloc/Heap.cpp:
+        (bmalloc::Heap::Heap):
+        (bmalloc::Heap::concurrentScavenge):
+        (bmalloc::Heap::scavenge):
+        (bmalloc::Heap::scavengeSmallPages):
+        (bmalloc::Heap::scavengeLargeObjects):
+        (bmalloc::Heap::allocateSmallPage):
+        (bmalloc::Heap::splitAndAllocate):
+        (bmalloc::Heap::deallocateLarge):
+        * bmalloc/Heap.h:
+        (bmalloc::Heap::takeRequestedScavengerThreadQOSClass): Deleted.
+        * bmalloc/VMHeap.h:
+        (bmalloc::VMHeap::deallocateSmallPage):
+        * bmalloc/bmalloc.h:
+        (bmalloc::api::scavenge):
+
 2017-04-25  Michael Saboff  <msaboff@apple.com>
 
         Call bmalloc scavenger first when handling a memory pressure event
index dced51b..80abe84 100644 (file)
@@ -40,7 +40,6 @@ namespace bmalloc {
 
 Heap::Heap(std::lock_guard<StaticMutex>&)
     : m_vmPageSizePhysical(vmPageSizePhysical())
-    , m_isAllocatingPages(false)
     , m_scavenger(*this, &Heap::concurrentScavenge)
     , m_debugHeap(nullptr)
 {
@@ -121,52 +120,63 @@ void Heap::initializePageMetadata()
 
 void Heap::concurrentScavenge()
 {
-    std::unique_lock<StaticMutex> lock(PerProcess<Heap>::mutex());
-
 #if BOS(DARWIN)
-    if (auto requestedQOSClass = PerProcess<Heap>::getFastCase()->takeRequestedScavengerThreadQOSClass())
-        pthread_set_qos_class_self_np(requestedQOSClass, 0);
+    pthread_set_qos_class_self_np(m_requestedScavengerThreadQOSClass, 0);
 #endif
 
-    scavenge(lock, scavengeSleepDuration);
+    std::unique_lock<StaticMutex> lock(PerProcess<Heap>::mutex());
+
+    scavenge(lock, Async);
 }
 
-void Heap::scavenge(std::unique_lock<StaticMutex>& lock, std::chrono::milliseconds sleepDuration)
+void Heap::scavenge(std::unique_lock<StaticMutex>& lock, ScavengeMode scavengeMode)
 {
-    waitUntilFalse(lock, sleepDuration, m_isAllocatingPages);
+    m_isAllocatingPages.fill(false);
+    m_isAllocatingLargePages = false;
 
-    scavengeSmallPages(lock, sleepDuration);
-    scavengeLargeObjects(lock, sleepDuration);
+    if (scavengeMode == Async)
+        sleep(lock, scavengeSleepDuration);
 
-    sleep(lock, sleepDuration);
+    scavengeSmallPages(lock, scavengeMode);
+    scavengeLargeObjects(lock, scavengeMode);
 }
 
-void Heap::scavengeSmallPages(std::unique_lock<StaticMutex>& lock, std::chrono::milliseconds sleepDuration)
+void Heap::scavengeSmallPages(std::unique_lock<StaticMutex>& lock, ScavengeMode scavengeMode)
 {
-    for (auto& smallPages : m_smallPages) {
+    for (size_t pageClass = 0; pageClass < pageClassCount; pageClass++) {
+        auto& smallPages = m_smallPages[pageClass];
+
         while (!smallPages.isEmpty()) {
+            if (m_isAllocatingPages[pageClass]) {
+                m_scavenger.run();
+                break;
+            }
+
             SmallPage* page = smallPages.pop();
-            size_t pageClass = m_pageClasses[page->sizeClass()];
-            m_vmHeap.deallocateSmallPage(lock, pageClass, page);
-            waitUntilFalse(lock, sleepDuration, m_isAllocatingPages);
+            m_vmHeap.deallocateSmallPage(lock, pageClass, page, scavengeMode);
         }
     }
 }
 
-void Heap::scavengeLargeObjects(std::unique_lock<StaticMutex>& lock, std::chrono::milliseconds sleepDuration)
+void Heap::scavengeLargeObjects(std::unique_lock<StaticMutex>& lock, ScavengeMode scavengeMode)
 {
     auto& ranges = m_largeFree.ranges();
     for (size_t i = ranges.size(); i-- > 0; i = std::min(i, ranges.size())) {
+        if (m_isAllocatingLargePages) {
+            m_scavenger.run();
+            break;
+        }
+
         auto range = ranges.pop(i);
 
-        lock.unlock();
+        if (scavengeMode == Async)
+            lock.unlock();
         vmDeallocatePhysicalPagesSloppy(range.begin(), range.size());
-        lock.lock();
+        if (scavengeMode == Async)
+            lock.lock();
 
         range.setPhysicalSize(0);
         ranges.push(range);
-
-        waitUntilFalse(lock, sleepDuration, m_isAllocatingPages);
     }
 }
 
@@ -180,7 +190,7 @@ SmallPage* Heap::allocateSmallPage(std::lock_guard<StaticMutex>& lock, size_t si
         if (!m_smallPages[pageClass].isEmpty())
             return m_smallPages[pageClass].pop();
 
-        m_isAllocatingPages = true;
+        m_isAllocatingPages[pageClass] = true;
 
         SmallPage* page = m_vmHeap.allocateSmallPage(lock, pageClass);
         m_objectTypes.set(Chunk::get(page), ObjectType::Small);
@@ -349,7 +359,7 @@ LargeRange Heap::splitAndAllocate(LargeRange& range, size_t alignment, size_t si
     }
     
     if (range.physicalSize() < range.size()) {
-        m_isAllocatingPages = true;
+        m_isAllocatingLargePages = true;
 
         vmAllocatePhysicalPagesSloppy(range.begin() + range.physicalSize(), range.size() - range.physicalSize());
         range.setPhysicalSize(range.size());
index 3e4bdb8..63616a5 100644 (file)
@@ -66,10 +66,9 @@ public:
     size_t largeSize(std::lock_guard<StaticMutex>&, void*);
     void shrinkLarge(std::lock_guard<StaticMutex>&, const Range&, size_t);
 
-    void scavenge(std::unique_lock<StaticMutex>&, std::chrono::milliseconds sleepDuration);
+    void scavenge(std::unique_lock<StaticMutex>&, ScavengeMode);
 
 #if BOS(DARWIN)
-    qos_class_t takeRequestedScavengerThreadQOSClass() { return std::exchange(m_requestedScavengerThreadQOSClass, QOS_CLASS_UNSPECIFIED); }
     void setScavengerThreadQOSClass(qos_class_t overrideClass) { m_requestedScavengerThreadQOSClass = overrideClass; }
 #endif
 
@@ -103,8 +102,8 @@ private:
     LargeRange splitAndAllocate(LargeRange&, size_t alignment, size_t);
 
     void concurrentScavenge();
-    void scavengeSmallPages(std::unique_lock<StaticMutex>&, std::chrono::milliseconds);
-    void scavengeLargeObjects(std::unique_lock<StaticMutex>&, std::chrono::milliseconds);
+    void scavengeSmallPages(std::unique_lock<StaticMutex>&, ScavengeMode);
+    void scavengeLargeObjects(std::unique_lock<StaticMutex>&, ScavengeMode);
 
     size_t m_vmPageSizePhysical;
     Vector<LineMetadata> m_smallLineMetadata;
@@ -118,7 +117,9 @@ private:
 
     Map<Chunk*, ObjectType, ChunkHash> m_objectTypes;
 
-    bool m_isAllocatingPages;
+    std::array<bool, pageClassCount> m_isAllocatingPages;
+    bool m_isAllocatingLargePages;
+
     AsyncTask<Heap, decltype(&Heap::concurrentScavenge)> m_scavenger;
 
     Environment m_environment;
index b98704a..c54bbf5 100644 (file)
@@ -41,10 +41,12 @@ class BeginTag;
 class EndTag;
 class Heap;
 
+typedef enum { Sync, Async } ScavengeMode;
+
 class VMHeap {
 public:
     SmallPage* allocateSmallPage(std::lock_guard<StaticMutex>&, size_t);
-    void deallocateSmallPage(std::unique_lock<StaticMutex>&, size_t, SmallPage*);
+    void deallocateSmallPage(std::unique_lock<StaticMutex>&, size_t, SmallPage*, ScavengeMode);
 
     LargeRange tryAllocateLargeChunk(std::lock_guard<StaticMutex>&, size_t alignment, size_t);
     
@@ -68,11 +70,13 @@ inline SmallPage* VMHeap::allocateSmallPage(std::lock_guard<StaticMutex>& lock,
     return page;
 }
 
-inline void VMHeap::deallocateSmallPage(std::unique_lock<StaticMutex>& lock, size_t pageClass, SmallPage* page)
+inline void VMHeap::deallocateSmallPage(std::unique_lock<StaticMutex>& lock, size_t pageClass, SmallPage* page, ScavengeMode scavengeMode)
 {
-    lock.unlock();
+    if (scavengeMode == Async)
+        lock.unlock();
     vmDeallocatePhysicalPagesSloppy(page->begin()->begin(), pageSize(pageClass));
-    lock.lock();
+    if (scavengeMode == Async)
+        lock.lock();
     
     m_smallPages[pageClass].push(page);
 }
index b4e55c7..6d1f1e1 100644 (file)
@@ -76,7 +76,7 @@ inline void scavenge()
     scavengeThisThread();
 
     std::unique_lock<StaticMutex> lock(PerProcess<Heap>::mutex());
-    PerProcess<Heap>::get()->scavenge(lock, std::chrono::milliseconds(0));
+    PerProcess<Heap>::get()->scavenge(lock, Sync);
 }
 
 inline bool isEnabled()