+2015-03-03 Geoffrey Garen <ggaren@apple.com>
+
+ bmalloc: Miscellaneous cleanup
+ https://bugs.webkit.org/show_bug.cgi?id=142231
+
+ Reviewed by Andreas Kling.
+
+ No performance change -- maybe a tiny reduction in memory use.
+
+ * bmalloc/Heap.cpp: Moved the sleep function into StaticMutex, since
+ it's a helper for working with mutexes.
+
+ (bmalloc::Heap::scavenge): Make sure to wait before we start any
+ scavenging, since individual scavenging functions now always scavenge
+ at least one page before waiting themselves.
+
+ (bmalloc::Heap::scavengeSmallPages):
+ (bmalloc::Heap::scavengeMediumPages):
+ (bmalloc::Heap::scavengeLargeObjects): Use the new wait helper to
+ simplify this code. Also, we now require our caller to wait until at
+ least one deallocation is desirable. This simplifies our loop.
+
+ (bmalloc::Heap::allocateSmallPage):
+ (bmalloc::Heap::allocateMediumPage):
+ (bmalloc::Heap::allocateXLarge):
+ (bmalloc::Heap::allocateLarge): Don't freak out any time the heap does
+ an allocation. Only consider the heap to be growing if it actually needs
+ to allocate new VM. This allows us to shrink the heap back down from a
+ high water mark more reliably even if heap activity continues.
+
+ (bmalloc::sleep): Deleted.
+ (bmalloc::Heap::scavengeLargeRanges): Renamed to match our use of
+ "LargeObject".
+
+ * bmalloc/Heap.h:
+
+ * bmalloc/LargeObject.h:
+ (bmalloc::LargeObject::operator bool): Added to simplify a while loop.
+
+ * bmalloc/StaticMutex.h:
+ (bmalloc::sleep):
+ (bmalloc::waitUntilFalse): New helper for waiting until a condition
+ becomes reliably false.
+
+ * bmalloc/Vector.h:
+ (bmalloc::Vector<T>::~Vector): Oops! Don't deallocate the null pointer.
+ We don't actually run any Vector destructors, but an iteration of this
+ patch did, and then crashed. So, let's fix that.
+
2015-03-02 Geoffrey Garen <ggaren@apple.com>
bmalloc: Eagerly remove allocated objects from the free list
namespace bmalloc {
-static inline void sleep(std::unique_lock<StaticMutex>& lock, std::chrono::milliseconds duration)
-{
- if (duration == std::chrono::milliseconds(0))
- return;
-
- lock.unlock();
- std::this_thread::sleep_for(duration);
- lock.lock();
-}
-
Heap::Heap(std::lock_guard<StaticMutex>&)
: m_largeObjects(Owner::Heap)
, m_isAllocatingPages(false)
std::unique_lock<StaticMutex> lock(PerProcess<Heap>::mutex());
scavenge(lock, scavengeSleepDuration);
}
-
+
void Heap::scavenge(std::unique_lock<StaticMutex>& lock, std::chrono::milliseconds sleepDuration)
{
+ waitUntilFalse(lock, sleepDuration, m_isAllocatingPages);
+
scavengeSmallPages(lock, sleepDuration);
scavengeMediumPages(lock, sleepDuration);
- scavengeLargeRanges(lock, sleepDuration);
+ scavengeLargeObjects(lock, sleepDuration);
sleep(lock, sleepDuration);
}
void Heap::scavengeSmallPages(std::unique_lock<StaticMutex>& lock, std::chrono::milliseconds sleepDuration)
{
- while (1) {
- if (m_isAllocatingPages) {
- m_isAllocatingPages = false;
-
- sleep(lock, sleepDuration);
- continue;
- }
-
- if (!m_smallPages.size())
- return;
+ while (m_smallPages.size()) {
m_vmHeap.deallocateSmallPage(lock, m_smallPages.pop());
+ waitUntilFalse(lock, sleepDuration, m_isAllocatingPages);
}
}
void Heap::scavengeMediumPages(std::unique_lock<StaticMutex>& lock, std::chrono::milliseconds sleepDuration)
{
- while (1) {
- if (m_isAllocatingPages) {
- m_isAllocatingPages = false;
-
- sleep(lock, sleepDuration);
- continue;
- }
-
- if (!m_mediumPages.size())
- return;
+ while (m_mediumPages.size()) {
m_vmHeap.deallocateMediumPage(lock, m_mediumPages.pop());
+ waitUntilFalse(lock, sleepDuration, m_isAllocatingPages);
}
}
-void Heap::scavengeLargeRanges(std::unique_lock<StaticMutex>& lock, std::chrono::milliseconds sleepDuration)
+void Heap::scavengeLargeObjects(std::unique_lock<StaticMutex>& lock, std::chrono::milliseconds sleepDuration)
{
- while (1) {
- if (m_isAllocatingPages) {
- m_isAllocatingPages = false;
-
- sleep(lock, sleepDuration);
- continue;
- }
-
- LargeObject largeObject = m_largeObjects.takeGreedy();
- if (!largeObject)
- return;
+ while (LargeObject largeObject = m_largeObjects.takeGreedy()) {
m_vmHeap.deallocateLargeObject(lock, largeObject);
+ waitUntilFalse(lock, sleepDuration, m_isAllocatingPages);
}
}
continue;
return page;
}
-
- m_isAllocatingPages = true;
SmallPage* page = [this, sizeClass]() {
if (m_smallPages.size())
return m_smallPages.pop();
+
+ m_isAllocatingPages = true;
return m_vmHeap.allocateSmallPage();
}();
continue;
return page;
}
-
- m_isAllocatingPages = true;
MediumPage* page = [this, sizeClass]() {
if (m_mediumPages.size())
return m_mediumPages.pop();
+
+ m_isAllocatingPages = true;
return m_vmHeap.allocateMediumPage();
}();
BASSERT(alignment >= xLargeAlignment);
BASSERT(size == roundUpToMultipleOf<xLargeAlignment>(size));
- m_isAllocatingPages = true;
-
void* result = vmAllocate(alignment, size);
m_xLargeObjects.push(Range(result, size));
return result;
BASSERT(size >= largeMin);
BASSERT(size == roundUpToMultipleOf<largeAlignment>(size));
- m_isAllocatingPages = true;
-
LargeObject largeObject = m_largeObjects.take(size);
- if (!largeObject)
+ if (!largeObject) {
+ m_isAllocatingPages = true;
largeObject = m_vmHeap.allocateLargeObject(size);
+ }
return allocateLarge(lock, largeObject, size);
}
BASSERT(alignment >= largeAlignment);
BASSERT(isPowerOfTwo(alignment));
- m_isAllocatingPages = true;
-
LargeObject largeObject = m_largeObjects.take(alignment, size, unalignedSize);
- if (!largeObject)
+ if (!largeObject) {
+ m_isAllocatingPages = true;
largeObject = m_vmHeap.allocateLargeObject(alignment, size, unalignedSize);
+ }
size_t alignmentMask = alignment - 1;
if (test(largeObject.begin(), alignmentMask)) {