bmalloc: Miscellaneous cleanup
authorggaren@apple.com <ggaren@apple.com@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Tue, 3 Mar 2015 21:47:49 +0000 (21:47 +0000)
committerggaren@apple.com <ggaren@apple.com@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Tue, 3 Mar 2015 21:47:49 +0000 (21:47 +0000)
https://bugs.webkit.org/show_bug.cgi?id=142231

Reviewed by Andreas Kling.

No performance change -- maybe a tiny reduction in memory use.

* bmalloc/Heap.cpp: Moved the sleep function into StaticMutex, since
it's a helper for working with mutexes.

(bmalloc::Heap::scavenge): Make sure to wait before we start any
scavenging, since individual scavenging functions now always scavenge
at least one page before waiting themselves.

(bmalloc::Heap::scavengeSmallPages):
(bmalloc::Heap::scavengeMediumPages):
(bmalloc::Heap::scavengeLargeObjects): Use the new wait helper to
simplify this code. Also, we now require our caller to wait until at
least one deallocation is desirable. This simplifies our loop.

(bmalloc::Heap::allocateSmallPage):
(bmalloc::Heap::allocateMediumPage):
(bmalloc::Heap::allocateXLarge):
(bmalloc::Heap::allocateLarge): Don't freak out any time the heap does
an allocation. Only consider the heap to be growing if it actually needs
to allocate new VM. This allows us to shrink the heap back down from a
high water mark more reliably even if heap activity continues.

(bmalloc::sleep): Deleted.
(bmalloc::Heap::scavengeLargeRanges): Renamed to match our use of
"LargeObject".

* bmalloc/Heap.h:

* bmalloc/LargeObject.h:
(bmalloc::LargeObject::operator bool): Added to simplify a while loop.

* bmalloc/StaticMutex.h:
(bmalloc::sleep):
(bmalloc::waitUntilFalse): New helper for waiting until a condition
becomes reliably false.

* bmalloc/Vector.h:
(bmalloc::Vector<T>::~Vector): Oops! Don't deallocate the null pointer.
We don't actually run any Vector destructors, but an iteration of this
patch did, and then crashed. So, let's fix that.

git-svn-id: https://svn.webkit.org/repository/webkit/trunk@180953 268f45cc-cd09-0410-ab3c-d52691b4dbfc

Source/bmalloc/ChangeLog
Source/bmalloc/bmalloc/Heap.cpp
Source/bmalloc/bmalloc/Heap.h
Source/bmalloc/bmalloc/LargeObject.h
Source/bmalloc/bmalloc/StaticMutex.h
Source/bmalloc/bmalloc/Vector.h

index 592d6829345b403d30da249a3b64710a5b06b0d7..db35f277d9a59aabe3ffc279e3912eb63ee4cd80 100644 (file)
@@ -1,3 +1,52 @@
+2015-03-03  Geoffrey Garen  <ggaren@apple.com>
+
+        bmalloc: Miscellaneous cleanup
+        https://bugs.webkit.org/show_bug.cgi?id=142231
+
+        Reviewed by Andreas Kling.
+
+        No performance change -- maybe a tiny reduction in memory use.
+
+        * bmalloc/Heap.cpp: Moved the sleep function into StaticMutex, since
+        it's a helper for working with mutexes.
+
+        (bmalloc::Heap::scavenge): Make sure to wait before we start any
+        scavenging, since individual scavenging functions now always scavenge
+        at least one page before waiting themselves.
+
+        (bmalloc::Heap::scavengeSmallPages):
+        (bmalloc::Heap::scavengeMediumPages):
+        (bmalloc::Heap::scavengeLargeObjects): Use the new wait helper to
+        simplify this code. Also, we now require our caller to wait until at
+        least one deallocation is desirable. This simplifies our loop.
+
+        (bmalloc::Heap::allocateSmallPage):
+        (bmalloc::Heap::allocateMediumPage):
+        (bmalloc::Heap::allocateXLarge):
+        (bmalloc::Heap::allocateLarge): Don't freak out any time the heap does
+        an allocation. Only consider the heap to be growing if it actually needs
+        to allocate new VM. This allows us to shrink the heap back down from a
+        high water mark more reliably even if heap activity continues.
+
+        (bmalloc::sleep): Deleted.
+        (bmalloc::Heap::scavengeLargeRanges): Renamed to match our use of
+        "LargeObject".
+
+        * bmalloc/Heap.h:
+
+        * bmalloc/LargeObject.h:
+        (bmalloc::LargeObject::operator bool): Added to simplify a while loop.
+
+        * bmalloc/StaticMutex.h:
+        (bmalloc::sleep):
+        (bmalloc::waitUntilFalse): New helper for waiting until a condition
+        becomes reliably false.
+
+        * bmalloc/Vector.h:
+        (bmalloc::Vector<T>::~Vector): Oops! Don't deallocate the null pointer.
+        We don't actually run any Vector destructors, but an iteration of this
+        patch did, and then crashed. So, let's fix that.
+
 2015-03-02  Geoffrey Garen  <ggaren@apple.com>
 
         bmalloc: Eagerly remove allocated objects from the free list
index 4aa5946ac72f28d7278a425df58b28da30b9942a..6502a749f685b3fa8f375ff8ef76f1d3c01c5d63 100644 (file)
 
 namespace bmalloc {
 
-static inline void sleep(std::unique_lock<StaticMutex>& lock, std::chrono::milliseconds duration)
-{
-    if (duration == std::chrono::milliseconds(0))
-        return;
-    
-    lock.unlock();
-    std::this_thread::sleep_for(duration);
-    lock.lock();
-}
-
 Heap::Heap(std::lock_guard<StaticMutex>&)
     : m_largeObjects(Owner::Heap)
     , m_isAllocatingPages(false)
@@ -93,62 +83,39 @@ void Heap::concurrentScavenge()
     std::unique_lock<StaticMutex> lock(PerProcess<Heap>::mutex());
     scavenge(lock, scavengeSleepDuration);
 }
-    
+
 void Heap::scavenge(std::unique_lock<StaticMutex>& lock, std::chrono::milliseconds sleepDuration)
 {
+    waitUntilFalse(lock, sleepDuration, m_isAllocatingPages);
+
     scavengeSmallPages(lock, sleepDuration);
     scavengeMediumPages(lock, sleepDuration);
-    scavengeLargeRanges(lock, sleepDuration);
+    scavengeLargeObjects(lock, sleepDuration);
 
     sleep(lock, sleepDuration);
 }
 
 void Heap::scavengeSmallPages(std::unique_lock<StaticMutex>& lock, std::chrono::milliseconds sleepDuration)
 {
-    while (1) {
-        if (m_isAllocatingPages) {
-            m_isAllocatingPages = false;
-
-            sleep(lock, sleepDuration);
-            continue;
-        }
-
-        if (!m_smallPages.size())
-            return;
+    while (m_smallPages.size()) {
         m_vmHeap.deallocateSmallPage(lock, m_smallPages.pop());
+        waitUntilFalse(lock, sleepDuration, m_isAllocatingPages);
     }
 }
 
 void Heap::scavengeMediumPages(std::unique_lock<StaticMutex>& lock, std::chrono::milliseconds sleepDuration)
 {
-    while (1) {
-        if (m_isAllocatingPages) {
-            m_isAllocatingPages = false;
-
-            sleep(lock, sleepDuration);
-            continue;
-        }
-
-        if (!m_mediumPages.size())
-            return;
+    while (m_mediumPages.size()) {
         m_vmHeap.deallocateMediumPage(lock, m_mediumPages.pop());
+        waitUntilFalse(lock, sleepDuration, m_isAllocatingPages);
     }
 }
 
-void Heap::scavengeLargeRanges(std::unique_lock<StaticMutex>& lock, std::chrono::milliseconds sleepDuration)
+void Heap::scavengeLargeObjects(std::unique_lock<StaticMutex>& lock, std::chrono::milliseconds sleepDuration)
 {
-    while (1) {
-        if (m_isAllocatingPages) {
-            m_isAllocatingPages = false;
-
-            sleep(lock, sleepDuration);
-            continue;
-        }
-
-        LargeObject largeObject = m_largeObjects.takeGreedy();
-        if (!largeObject)
-            return;
+    while (LargeObject largeObject = m_largeObjects.takeGreedy()) {
         m_vmHeap.deallocateLargeObject(lock, largeObject);
+        waitUntilFalse(lock, sleepDuration, m_isAllocatingPages);
     }
 }
 
@@ -235,12 +202,12 @@ SmallPage* Heap::allocateSmallPage(std::lock_guard<StaticMutex>& lock, size_t si
             continue;
         return page;
     }
-    
-    m_isAllocatingPages = true;
 
     SmallPage* page = [this, sizeClass]() {
         if (m_smallPages.size())
             return m_smallPages.pop();
+
+        m_isAllocatingPages = true;
         return m_vmHeap.allocateSmallPage();
     }();
 
@@ -257,12 +224,12 @@ MediumPage* Heap::allocateMediumPage(std::lock_guard<StaticMutex>& lock, size_t
             continue;
         return page;
     }
-    
-    m_isAllocatingPages = true;
 
     MediumPage* page = [this, sizeClass]() {
         if (m_mediumPages.size())
             return m_mediumPages.pop();
+
+        m_isAllocatingPages = true;
         return m_vmHeap.allocateMediumPage();
     }();
 
@@ -320,8 +287,6 @@ void* Heap::allocateXLarge(std::lock_guard<StaticMutex>&, size_t alignment, size
     BASSERT(alignment >= xLargeAlignment);
     BASSERT(size == roundUpToMultipleOf<xLargeAlignment>(size));
 
-    m_isAllocatingPages = true;
-
     void* result = vmAllocate(alignment, size);
     m_xLargeObjects.push(Range(result, size));
     return result;
@@ -379,11 +344,11 @@ void* Heap::allocateLarge(std::lock_guard<StaticMutex>& lock, size_t size)
     BASSERT(size >= largeMin);
     BASSERT(size == roundUpToMultipleOf<largeAlignment>(size));
     
-    m_isAllocatingPages = true;
-
     LargeObject largeObject = m_largeObjects.take(size);
-    if (!largeObject)
+    if (!largeObject) {
+        m_isAllocatingPages = true;
         largeObject = m_vmHeap.allocateLargeObject(size);
+    }
 
     return allocateLarge(lock, largeObject, size);
 }
@@ -400,11 +365,11 @@ void* Heap::allocateLarge(std::lock_guard<StaticMutex>& lock, size_t alignment,
     BASSERT(alignment >= largeAlignment);
     BASSERT(isPowerOfTwo(alignment));
 
-    m_isAllocatingPages = true;
-
     LargeObject largeObject = m_largeObjects.take(alignment, size, unalignedSize);
-    if (!largeObject)
+    if (!largeObject) {
+        m_isAllocatingPages = true;
         largeObject = m_vmHeap.allocateLargeObject(alignment, size, unalignedSize);
+    }
 
     size_t alignmentMask = alignment - 1;
     if (test(largeObject.begin(), alignmentMask)) {
index 6a82f265f30105efdf3f54856e7373495ba2de35..b3a80c04f95a1450d6973283189cab92e14e6e48 100644 (file)
@@ -92,7 +92,7 @@ private:
     void concurrentScavenge();
     void scavengeSmallPages(std::unique_lock<StaticMutex>&, std::chrono::milliseconds);
     void scavengeMediumPages(std::unique_lock<StaticMutex>&, std::chrono::milliseconds);
-    void scavengeLargeRanges(std::unique_lock<StaticMutex>&, std::chrono::milliseconds);
+    void scavengeLargeObjects(std::unique_lock<StaticMutex>&, std::chrono::milliseconds);
 
     std::array<std::array<LineMetadata, SmallPage::lineCount>, smallMax / alignment> m_smallLineMetadata;
     std::array<std::array<LineMetadata, MediumPage::lineCount>, mediumMax / alignment> m_mediumLineMetadata;
index dba8b93c3e8cf04cd5c8c02504a67d5ed7ad1d16..e94f21f08ff2350c4b476501c128a3d82f998539 100644 (file)
@@ -43,6 +43,7 @@ public:
     enum DoNotValidateTag { DoNotValidate };
     LargeObject(DoNotValidateTag, void*);
     
+    operator bool() { return !!*this; }
     bool operator!() { return !m_object; }
 
     char* begin() const { return static_cast<char*>(m_object); }
index ec47dd18a97a3bbcca01bd44cbcce0fcecf5df32..4404e952dd46b895c0e9bd21f0b3c124b6a31d03 100644 (file)
@@ -28,6 +28,8 @@
 
 #include "BAssert.h"
 #include <atomic>
+#include <mutex>
+#include <thread>
 
 // A fast replacement for std::mutex, for use in static storage.
 
@@ -52,6 +54,27 @@ private:
     std::atomic_flag m_flag;
 };
 
+static inline void sleep(
+    std::unique_lock<StaticMutex>& lock, std::chrono::milliseconds duration)
+{
+    if (duration == std::chrono::milliseconds(0))
+        return;
+    
+    lock.unlock();
+    std::this_thread::sleep_for(duration);
+    lock.lock();
+}
+
+static inline void waitUntilFalse(
+    std::unique_lock<StaticMutex>& lock, std::chrono::milliseconds sleepDuration,
+    bool& condition)
+{
+    while (condition) {
+        condition = false;
+        sleep(lock, sleepDuration);
+    }
+}
+
 inline void StaticMutex::init()
 {
     m_flag.clear();
index 5b4112be63cc97fe14614a80866a6896732d7914..2cda0dda2e7ebf75f0270a23008dd8242274b4b0 100644 (file)
@@ -88,7 +88,8 @@ inline Vector<T>::Vector()
 template<typename T>
 Vector<T>::~Vector()
 {
-    vmDeallocate(m_buffer, vmSize(m_capacity * sizeof(T)));
+    if (m_buffer)
+        vmDeallocate(m_buffer, vmSize(m_capacity * sizeof(T)));
 }
 
 template<typename T>