Use one Scavenger thread for all Heaps
authorfpizlo@apple.com <fpizlo@apple.com@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Fri, 6 Oct 2017 16:34:41 +0000 (16:34 +0000)
committerfpizlo@apple.com <fpizlo@apple.com@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Fri, 6 Oct 2017 16:34:41 +0000 (16:34 +0000)
https://bugs.webkit.org/show_bug.cgi?id=174973

Reviewed by JF Bastien.

This combines the scavengers from all Heap instances into a single scavenger. It also combines
the accounting for deciding when to run. Each Heap still controls what it means to scavenge
itself (it's all in Heap::scavenge) but the policy decisions are all controlled by Scavenger.
Because Scavenger is also the only thing that needs an AsyncTask, this removes AsyncTask and
moves all of AsyncTask's logic into Scavenger.

This appears to be a 1% progression on JetStream (with high statistical confidence: p = 0.0049).

* bmalloc.xcodeproj/project.pbxproj:
* bmalloc/AsyncTask.h: Removed.
* bmalloc/Heap.cpp:
(bmalloc::Heap::Heap):
(bmalloc::Heap::allocateSmallChunk):
(bmalloc::Heap::allocateSmallPage):
(bmalloc::Heap::deallocateSmallLine):
(bmalloc::Heap::splitAndAllocate):
(bmalloc::Heap::tryAllocateLarge):
(bmalloc::Heap::shrinkLarge):
(bmalloc::Heap::deallocateLarge):
(bmalloc::Heap::concurrentScavenge): Deleted.
(bmalloc::Heap::scheduleScavengerIfUnderMemoryPressure): Deleted.
(bmalloc::Heap::scheduleScavenger): Deleted.
* bmalloc/Heap.h:
* bmalloc/Scavenger.cpp:
(bmalloc::Scavenger::Scavenger):
(bmalloc::Scavenger::run):
(bmalloc::Scavenger::runHoldingLock):
(bmalloc::Scavenger::runSoon):
(bmalloc::Scavenger::runSoonHoldingLock):
(bmalloc::Scavenger::didStartGrowing):
(bmalloc::Scavenger::scheduleIfUnderMemoryPressure):
(bmalloc::Scavenger::scheduleIfUnderMemoryPressureHoldingLock):
(bmalloc::Scavenger::schedule):
(bmalloc::Scavenger::threadEntryPoint):
(bmalloc::Scavenger::threadRunLoop):
(bmalloc::Scavenger::setSelfQOSClass):
* bmalloc/Scavenger.h:
(bmalloc::Scavenger::willRun):
(bmalloc::Scavenger::willRunSoon):

git-svn-id: https://svn.webkit.org/repository/webkit/trunk@222982 268f45cc-cd09-0410-ab3c-d52691b4dbfc

Source/bmalloc/ChangeLog
Source/bmalloc/bmalloc.xcodeproj/project.pbxproj
Source/bmalloc/bmalloc/AsyncTask.h [deleted file]
Source/bmalloc/bmalloc/Heap.cpp
Source/bmalloc/bmalloc/Heap.h
Source/bmalloc/bmalloc/Scavenger.cpp
Source/bmalloc/bmalloc/Scavenger.h

index 59c4cf8..3fe2cd0 100644 (file)
@@ -1,3 +1,50 @@
+2017-10-05  Filip Pizlo  <fpizlo@apple.com>
+
+        Use one Scavenger thread for all Heaps
+        https://bugs.webkit.org/show_bug.cgi?id=174973
+
+        Reviewed by JF Bastien.
+        
+        This combines the scavengers from all Heap instances into a single scavenger. It also combines
+        the accounting for deciding when to run. Each Heap still controls what it means to scavenge
+        itself (it's all in Heap::scavenge) but the policy decisions are all controlled by Scavenger.
+        Because Scavenger is also the only thing that needs an AsyncTask, this removes AsyncTask and
+        moves all of AsyncTask's logic into Scavenger.
+        
+        This appears to be a 1% progression on JetStream (with high statistical confidence: p = 0.0049).
+
+        * bmalloc.xcodeproj/project.pbxproj:
+        * bmalloc/AsyncTask.h: Removed.
+        * bmalloc/Heap.cpp:
+        (bmalloc::Heap::Heap):
+        (bmalloc::Heap::allocateSmallChunk):
+        (bmalloc::Heap::allocateSmallPage):
+        (bmalloc::Heap::deallocateSmallLine):
+        (bmalloc::Heap::splitAndAllocate):
+        (bmalloc::Heap::tryAllocateLarge):
+        (bmalloc::Heap::shrinkLarge):
+        (bmalloc::Heap::deallocateLarge):
+        (bmalloc::Heap::concurrentScavenge): Deleted.
+        (bmalloc::Heap::scheduleScavengerIfUnderMemoryPressure): Deleted.
+        (bmalloc::Heap::scheduleScavenger): Deleted.
+        * bmalloc/Heap.h:
+        * bmalloc/Scavenger.cpp:
+        (bmalloc::Scavenger::Scavenger):
+        (bmalloc::Scavenger::run):
+        (bmalloc::Scavenger::runHoldingLock):
+        (bmalloc::Scavenger::runSoon):
+        (bmalloc::Scavenger::runSoonHoldingLock):
+        (bmalloc::Scavenger::didStartGrowing):
+        (bmalloc::Scavenger::scheduleIfUnderMemoryPressure):
+        (bmalloc::Scavenger::scheduleIfUnderMemoryPressureHoldingLock):
+        (bmalloc::Scavenger::schedule):
+        (bmalloc::Scavenger::threadEntryPoint):
+        (bmalloc::Scavenger::threadRunLoop):
+        (bmalloc::Scavenger::setSelfQOSClass):
+        * bmalloc/Scavenger.h:
+        (bmalloc::Scavenger::willRun):
+        (bmalloc::Scavenger::willRunSoon):
+
 2017-10-04  Filip Pizlo  <fpizlo@apple.com>
 
         bmalloc mutex should be adaptive
index 12ced24..c90760a 100644 (file)
@@ -48,7 +48,6 @@
                14DD78BC18F48D6B00950702 /* SmallLine.h in Headers */ = {isa = PBXBuildFile; fileRef = 1452478618BC757C00F80098 /* SmallLine.h */; settings = {ATTRIBUTES = (Private, ); }; };
                14DD78BD18F48D6B00950702 /* SmallPage.h in Headers */ = {isa = PBXBuildFile; fileRef = 143E29ED18CAE90500FE8A0F /* SmallPage.h */; settings = {ATTRIBUTES = (Private, ); }; };
                14DD78C518F48D7500950702 /* Algorithm.h in Headers */ = {isa = PBXBuildFile; fileRef = 1421A87718EE462A00B4DD68 /* Algorithm.h */; settings = {ATTRIBUTES = (Private, ); }; };
-               14DD78C618F48D7500950702 /* AsyncTask.h in Headers */ = {isa = PBXBuildFile; fileRef = 1417F65218BA88A00076FA3F /* AsyncTask.h */; settings = {ATTRIBUTES = (Private, ); }; };
                14DD78C718F48D7500950702 /* BAssert.h in Headers */ = {isa = PBXBuildFile; fileRef = 1413E468189EEDE400546D68 /* BAssert.h */; settings = {ATTRIBUTES = (Private, ); }; };
                14DD78C818F48D7500950702 /* FixedVector.h in Headers */ = {isa = PBXBuildFile; fileRef = 14D9DB4517F2447100EAAB79 /* FixedVector.h */; settings = {ATTRIBUTES = (Private, ); }; };
                14DD78C918F48D7500950702 /* BInline.h in Headers */ = {isa = PBXBuildFile; fileRef = 1413E460189DCE1E00546D68 /* BInline.h */; settings = {ATTRIBUTES = (Private, ); }; };
@@ -98,7 +97,6 @@
                1413E462189DE1CD00546D68 /* BumpAllocator.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; lineEnding = 0; name = BumpAllocator.h; path = bmalloc/BumpAllocator.h; sourceTree = "<group>"; };
                1413E468189EEDE400546D68 /* BAssert.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = BAssert.h; path = bmalloc/BAssert.h; sourceTree = "<group>"; };
                1417F64F18B7280C0076FA3F /* Syscall.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = Syscall.h; path = bmalloc/Syscall.h; sourceTree = "<group>"; };
-               1417F65218BA88A00076FA3F /* AsyncTask.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = AsyncTask.h; path = bmalloc/AsyncTask.h; sourceTree = "<group>"; };
                141D9AFF1C8E51C0000ABBA0 /* List.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = List.h; path = bmalloc/List.h; sourceTree = "<group>"; };
                1421A87718EE462A00B4DD68 /* Algorithm.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; name = Algorithm.h; path = bmalloc/Algorithm.h; sourceTree = "<group>"; };
                142B44341E2839E7001DA6E9 /* DebugHeap.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = DebugHeap.cpp; path = bmalloc/DebugHeap.cpp; sourceTree = "<group>"; };
                        children = (
                                4408F2961C9896C40012EC64 /* darwin */,
                                1421A87718EE462A00B4DD68 /* Algorithm.h */,
-                               1417F65218BA88A00076FA3F /* AsyncTask.h */,
                                6599C5CA1EC3F15900A2F7BB /* AvailableMemory.cpp */,
                                6599C5CB1EC3F15900A2F7BB /* AvailableMemory.h */,
                                1413E468189EEDE400546D68 /* BAssert.h */,
                                14DD789818F48D4A00950702 /* Allocator.h in Headers */,
                                0F5BF1531F22E1570029D91D /* Scavenger.h in Headers */,
                                0F5BF1471F22A8B10029D91D /* HeapKind.h in Headers */,
-                               14DD78C618F48D7500950702 /* AsyncTask.h in Headers */,
                                6599C5CD1EC3F15900A2F7BB /* AvailableMemory.h in Headers */,
                                14DD78C718F48D7500950702 /* BAssert.h in Headers */,
                                1448C30118F3754C00502839 /* bmalloc.h in Headers */,
diff --git a/Source/bmalloc/bmalloc/AsyncTask.h b/Source/bmalloc/bmalloc/AsyncTask.h
deleted file mode 100644 (file)
index c6c0649..0000000
+++ /dev/null
@@ -1,144 +0,0 @@
-/*
- * Copyright (C) 2014, 2015 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
- */
-
-#ifndef AsyncTask_h
-#define AsyncTask_h
-
-#include "BAssert.h"
-#include "BInline.h"
-#include "Mutex.h"
-#include "Sizes.h"
-#include <atomic>
-#include <condition_variable>
-#include <thread>
-
-namespace bmalloc {
-
-template<typename Object, typename Function>
-class AsyncTask {
-public:
-    AsyncTask(Object&, const Function&);
-    ~AsyncTask();
-    
-    bool willRun() { return m_state == State::Run; }
-    void run();
-    
-    bool willRunSoon() { return m_state > State::Sleep; }
-    void runSoon();
-    
-private:
-    enum class State { Sleep, Run, RunSoon };
-    
-    void runSlowCase();
-    void runSoonSlowCase();
-    
-    static void threadEntryPoint(AsyncTask*);
-    void threadRunLoop();
-
-    std::atomic<State> m_state;
-
-    Mutex m_conditionMutex;
-    std::condition_variable_any m_condition;
-
-    std::thread m_thread;
-
-    Object& m_object;
-    Function m_function;
-};
-
-template<typename Object, typename Function>
-AsyncTask<Object, Function>::AsyncTask(Object& object, const Function& function)
-    : m_state(State::Sleep)
-    , m_condition()
-    , m_thread(std::thread(&AsyncTask::threadEntryPoint, this))
-    , m_object(object)
-    , m_function(function)
-{
-}
-
-template<typename Object, typename Function>
-AsyncTask<Object, Function>::~AsyncTask()
-{
-    // We'd like to mark our destructor deleted but C++ won't allow it because
-    // we are an automatic member of Heap.
-    RELEASE_BASSERT(0);
-}
-
-template<typename Object, typename Function>
-void AsyncTask<Object, Function>::run()
-{
-    m_state = State::Run;
-    
-    std::lock_guard<Mutex> lock(m_conditionMutex);
-    m_condition.notify_all();
-}
-    
-template<typename Object, typename Function>
-void AsyncTask<Object, Function>::runSoon()
-{
-    m_state = State::RunSoon;
-    
-    std::lock_guard<Mutex> lock(m_conditionMutex);
-    m_condition.notify_all();
-}
-
-template<typename Object, typename Function>
-void AsyncTask<Object, Function>::threadEntryPoint(AsyncTask* asyncTask)
-{
-#if BOS(DARWIN)
-    pthread_set_qos_class_self_np(QOS_CLASS_USER_INTERACTIVE, 0);
-#endif
-
-    asyncTask->threadRunLoop();
-}
-
-template<typename Object, typename Function>
-void AsyncTask<Object, Function>::threadRunLoop()
-{
-    // This loop ratchets downward from most active to least active state. While
-    // we ratchet downward, any other thread may reset our state.
-    
-    // We require any state change while we are sleeping to signal to our
-    // condition variable and wake us up.
-    
-    while (1) {
-        if (m_state == State::Sleep) {
-            std::unique_lock<Mutex> lock(m_conditionMutex);
-            m_condition.wait(lock, [&]() { return m_state != State::Sleep; });
-        }
-        
-        if (m_state == State::RunSoon) {
-            std::unique_lock<Mutex> lock(m_conditionMutex);
-            m_condition.wait_for(lock, asyncTaskSleepDuration, [&]() { return m_state != State::RunSoon; });
-        }
-        
-        m_state = State::Sleep;
-        (m_object.*m_function)();
-    }
-}
-
-} // namespace bmalloc
-
-#endif // AsyncTask_h
index 9dcf40a..1c1710c 100644 (file)
@@ -44,7 +44,6 @@ namespace bmalloc {
 Heap::Heap(HeapKind kind, std::lock_guard<StaticMutex>&)
     : m_kind(kind)
     , m_vmPageSizePhysical(vmPageSizePhysical())
-    , m_scavenger(*this, &Heap::concurrentScavenge)
     , m_debugHeap(nullptr)
 {
     RELEASE_BASSERT(vmPageSizePhysical() >= smallPageSize);
@@ -65,7 +64,7 @@ Heap::Heap(HeapKind kind, std::lock_guard<StaticMutex>&)
 #endif
     }
     
-    PerProcess<Scavenger>::get();
+    m_scavenger = PerProcess<Scavenger>::get();
 }
 
 bool Heap::usingGigacage()
@@ -139,23 +138,6 @@ void Heap::initializePageMetadata()
         m_pageClasses[i] = (computePageSize(i) - 1) / smallPageSize;
 }
 
-void Heap::concurrentScavenge()
-{
-    std::lock_guard<StaticMutex> lock(mutex());
-
-#if BOS(DARWIN)
-    pthread_set_qos_class_self_np(PerProcess<Scavenger>::getFastCase()->requestedScavengerThreadQOSClass(), 0);
-#endif
-
-    if (m_isGrowing && !isUnderMemoryPressure()) {
-        m_isGrowing = false;
-        m_scavenger.runSoon();
-        return;
-    }
-    
-    scavenge(lock);
-}
-
 void Heap::scavenge(std::lock_guard<StaticMutex>&)
 {
     for (auto& list : m_freePages) {
@@ -183,35 +165,6 @@ void Heap::scavenge(std::lock_guard<StaticMutex>&)
     }
 }
 
-void Heap::scheduleScavengerIfUnderMemoryPressure(size_t bytes)
-{
-    m_scavengerBytes += bytes;
-    if (m_scavengerBytes < scavengerBytesPerMemoryPressureCheck)
-        return;
-
-    m_scavengerBytes = 0;
-
-    if (m_scavenger.willRun())
-        return;
-
-    if (!isUnderMemoryPressure())
-        return;
-
-    m_isGrowing = false;
-    m_scavenger.run();
-}
-
-void Heap::scheduleScavenger(size_t bytes)
-{
-    scheduleScavengerIfUnderMemoryPressure(bytes);
-
-    if (m_scavenger.willRunSoon())
-        return;
-
-    m_isGrowing = false;
-    m_scavenger.runSoon();
-}
-
 void Heap::deallocateLineCache(std::lock_guard<StaticMutex>&, LineCache& lineCache)
 {
     for (auto& list : lineCache) {
@@ -242,7 +195,7 @@ void Heap::allocateSmallChunk(std::lock_guard<StaticMutex>& lock, size_t pageCla
             chunk->freePages().push(page);
         });
         
-        scheduleScavenger(0);
+        m_scavenger->schedule(0);
 
         return chunk;
     }();
@@ -274,7 +227,7 @@ SmallPage* Heap::allocateSmallPage(std::lock_guard<StaticMutex>& lock, size_t si
     if (!m_lineCache[sizeClass].isEmpty())
         return m_lineCache[sizeClass].popFront();
 
-    m_isGrowing = true;
+    m_scavenger->didStartGrowing();
     
     SmallPage* page = [&]() {
         size_t pageClass = m_pageClasses[sizeClass];
@@ -291,7 +244,7 @@ SmallPage* Heap::allocateSmallPage(std::lock_guard<StaticMutex>& lock, size_t si
             m_freePages[pageClass].remove(chunk);
 
         if (!page->hasPhysicalPages()) {
-            scheduleScavengerIfUnderMemoryPressure(pageSize(pageClass));
+            m_scavenger->scheduleIfUnderMemoryPressure(pageSize(pageClass));
 
             vmAllocatePhysicalPagesSloppy(page->begin()->begin(), pageSize(pageClass));
             page->setHasPhysicalPages(true);
@@ -339,7 +292,7 @@ void Heap::deallocateSmallLine(std::lock_guard<StaticMutex>& lock, Object object
         m_chunkCache[pageClass].push(chunk);
     }
     
-    scheduleScavenger(pageSize(pageClass));
+    m_scavenger->schedule(pageSize(pageClass));
 }
 
 void Heap::allocateSmallBumpRangesByMetadata(
@@ -486,7 +439,7 @@ LargeRange Heap::splitAndAllocate(LargeRange& range, size_t alignment, size_t si
         
     case AllocationKind::Physical:
         if (range.physicalSize() < range.size()) {
-            scheduleScavengerIfUnderMemoryPressure(range.size());
+            m_scavenger->scheduleIfUnderMemoryPressure(range.size());
             
             vmAllocatePhysicalPagesSloppy(range.begin() + range.physicalSize(), range.size() - range.physicalSize());
             range.setPhysicalSize(range.size());
@@ -513,7 +466,7 @@ void* Heap::tryAllocateLarge(std::lock_guard<StaticMutex>&, size_t alignment, si
     if (m_debugHeap)
         return m_debugHeap->memalignLarge(alignment, size, allocationKind);
     
-    m_isGrowing = true;
+    m_scavenger->didStartGrowing();
     
     size_t roundedSize = size ? roundUpToMultipleOf(largeAlignment, size) : largeAlignment;
     if (roundedSize < size) // Check for overflow
@@ -567,7 +520,7 @@ void Heap::shrinkLarge(std::lock_guard<StaticMutex>&, const Range& object, size_
     LargeRange range = LargeRange(object, size);
     splitAndAllocate(range, alignment, newSize, AllocationKind::Physical);
 
-    scheduleScavenger(size);
+    m_scavenger->schedule(size);
 }
 
 void Heap::deallocateLarge(std::lock_guard<StaticMutex>&, void* object, AllocationKind allocationKind)
@@ -577,7 +530,7 @@ void Heap::deallocateLarge(std::lock_guard<StaticMutex>&, void* object, Allocati
 
     size_t size = m_largeAllocated.remove(object);
     m_largeFree.add(LargeRange(object, size, allocationKind == AllocationKind::Physical ? size : 0));
-    scheduleScavenger(size);
+    m_scavenger->schedule(size);
 }
 
 } // namespace bmalloc
index 533b93d..39fcc73 100644 (file)
@@ -27,7 +27,6 @@
 #define Heap_h
 
 #include "AllocationKind.h"
-#include "AsyncTask.h"
 #include "BumpRange.h"
 #include "Chunk.h"
 #include "HeapKind.h"
@@ -51,6 +50,7 @@ class BeginTag;
 class BumpAllocator;
 class DebugHeap;
 class EndTag;
+class Scavenger;
 
 class Heap {
 public:
@@ -112,11 +112,6 @@ private:
 
     LargeRange splitAndAllocate(LargeRange&, size_t alignment, size_t, AllocationKind);
 
-    void scheduleScavenger(size_t);
-    void scheduleScavengerIfUnderMemoryPressure(size_t);
-    
-    void concurrentScavenge();
-    
     HeapKind m_kind;
     
     size_t m_vmPageSizePhysical;
@@ -132,12 +127,8 @@ private:
 
     Map<Chunk*, ObjectType, ChunkHash> m_objectTypes;
 
-    size_t m_scavengerBytes { 0 };
-    bool m_isGrowing { false };
-    
-    AsyncTask<Heap, decltype(&Heap::concurrentScavenge)> m_scavenger;
-
-    DebugHeap* m_debugHeap;
+    Scavenger* m_scavenger { nullptr };
+    DebugHeap* m_debugHeap { nullptr };
 };
 
 inline void Heap::allocateSmallBumpRanges(
index 6b64c27..ac2a668 100644 (file)
@@ -25,6 +25,7 @@
 
 #include "Scavenger.h"
 
+#include "AvailableMemory.h"
 #include "Heap.h"
 #include <thread>
 
@@ -41,6 +42,76 @@ Scavenger::Scavenger(std::lock_guard<StaticMutex>&)
     dispatch_resume(m_pressureHandlerDispatchSource);
     dispatch_release(queue);
 #endif
+    
+    m_thread = std::thread(&threadEntryPoint, this);
+}
+
+void Scavenger::run()
+{
+    std::lock_guard<Mutex> lock(m_mutex);
+    runHoldingLock();
+}
+
+void Scavenger::runHoldingLock()
+{
+    m_state = State::Run;
+    m_condition.notify_all();
+}
+
+void Scavenger::runSoon()
+{
+    std::lock_guard<Mutex> lock(m_mutex);
+    runSoonHoldingLock();
+}
+
+void Scavenger::runSoonHoldingLock()
+{
+    if (willRunSoon())
+        return;
+    m_state = State::RunSoon;
+    m_condition.notify_all();
+}
+
+void Scavenger::didStartGrowing()
+{
+    // We don't really need to lock here, since this is just a heuristic.
+    m_isProbablyGrowing = true;
+}
+
+void Scavenger::scheduleIfUnderMemoryPressure(size_t bytes)
+{
+    std::lock_guard<Mutex> lock(m_mutex);
+    scheduleIfUnderMemoryPressureHoldingLock(bytes);
+}
+
+void Scavenger::scheduleIfUnderMemoryPressureHoldingLock(size_t bytes)
+{
+    m_scavengerBytes += bytes;
+    if (m_scavengerBytes < scavengerBytesPerMemoryPressureCheck)
+        return;
+
+    m_scavengerBytes = 0;
+
+    if (willRun())
+        return;
+
+    if (!isUnderMemoryPressure())
+        return;
+
+    m_isProbablyGrowing = false;
+    runHoldingLock();
+}
+
+void Scavenger::schedule(size_t bytes)
+{
+    std::lock_guard<Mutex> lock(m_mutex);
+    scheduleIfUnderMemoryPressureHoldingLock(bytes);
+    
+    if (willRunSoon())
+        return;
+    
+    m_isProbablyGrowing = false;
+    runSoonHoldingLock();
 }
 
 void Scavenger::scavenge()
@@ -50,5 +121,57 @@ void Scavenger::scavenge()
         PerProcess<PerHeapKind<Heap>>::get()->at(i).scavenge(lock);
 }
 
+void Scavenger::threadEntryPoint(Scavenger* scavenger)
+{
+    scavenger->threadRunLoop();
+}
+
+void Scavenger::threadRunLoop()
+{
+    setSelfQOSClass();
+    
+    // This loop ratchets downward from most active to least active state. While
+    // we ratchet downward, any other thread may reset our state.
+    
+    // We require any state change while we are sleeping to signal to our
+    // condition variable and wake us up.
+    
+    auto truth = [] { return true; };
+    
+    while (truth()) {
+        if (m_state == State::Sleep) {
+            std::unique_lock<Mutex> lock(m_mutex);
+            m_condition.wait(lock, [&]() { return m_state != State::Sleep; });
+        }
+        
+        if (m_state == State::RunSoon) {
+            std::unique_lock<Mutex> lock(m_mutex);
+            m_condition.wait_for(lock, asyncTaskSleepDuration, [&]() { return m_state != State::RunSoon; });
+        }
+        
+        m_state = State::Sleep;
+        
+        setSelfQOSClass();
+        
+        {
+            std::unique_lock<Mutex> lock(m_mutex);
+            if (m_isProbablyGrowing && !isUnderMemoryPressure()) {
+                m_isProbablyGrowing = false;
+                runSoonHoldingLock();
+                continue;
+            }
+        }
+        
+        scavenge();
+    }
+}
+
+void Scavenger::setSelfQOSClass()
+{
+#if BOS(DARWIN)
+    pthread_set_qos_class_self_np(requestedScavengerThreadQOSClass(), 0);
+#endif
+}
+
 } // namespace bmalloc
 
index 49731dc..a832520 100644 (file)
@@ -26,7 +26,8 @@
 #pragma once
 
 #include "BPlatform.h"
-#include "StaticMutex.h"
+#include "Mutex.h"
+#include <condition_variable>
 #include <mutex>
 
 #if BOS(DARWIN)
@@ -35,9 +36,6 @@
 
 namespace bmalloc {
 
-// FIXME: This class should become a common scavenger mechanism for all heaps.
-// https://bugs.webkit.org/show_bug.cgi?id=174973
-
 class Scavenger {
 public:
     Scavenger(std::lock_guard<StaticMutex>&);
@@ -50,8 +48,39 @@ public:
     void setScavengerThreadQOSClass(qos_class_t overrideClass) { m_requestedScavengerThreadQOSClass = overrideClass; }
     qos_class_t requestedScavengerThreadQOSClass() const { return m_requestedScavengerThreadQOSClass; }
 #endif
+    
+    bool willRun() { return m_state == State::Run; }
+    void run();
+    
+    bool willRunSoon() { return m_state > State::Sleep; }
+    void runSoon();
+    
+    void didStartGrowing();
+    void scheduleIfUnderMemoryPressure(size_t bytes);
+    void schedule(size_t bytes);
 
 private:
+    enum class State { Sleep, Run, RunSoon };
+    
+    void runHoldingLock();
+    void runSoonHoldingLock();
+
+    void scheduleIfUnderMemoryPressureHoldingLock(size_t bytes);
+
+    static void threadEntryPoint(Scavenger*);
+    void threadRunLoop();
+    
+    void setSelfQOSClass();
+    
+    std::atomic<State> m_state { State::Sleep };
+    size_t m_scavengerBytes { 0 };
+    bool m_isProbablyGrowing { false };
+    
+    Mutex m_mutex;
+    std::condition_variable_any m_condition;
+
+    std::thread m_thread;
+    
 #if BOS(DARWIN)
     dispatch_source_t m_pressureHandlerDispatchSource;
     qos_class_t m_requestedScavengerThreadQOSClass { QOS_CLASS_USER_INITIATED };