+2017-04-27 Michael Saboff <msaboff@apple.com>
+
+ bmalloc scavenger should know what page classes are allocating
+ https://bugs.webkit.org/show_bug.cgi?id=171384
+
+ Reviewed by Geoffrey Garen.
+
+ This change replaces m_isAllocatingPages with a per page class flag to track which page
+ classes are currently allocating. When scavenging, we skip page classes that are actively
+ allocating and come back to them on a subsequent pass. This reduces the amount of time it
+ takes for scavenger to free up pages as well as the total time it takes to handle all
+ page classes.
+
+ * bmalloc/Heap.cpp:
+ (bmalloc::Heap::Heap):
+ (bmalloc::Heap::concurrentScavenge):
+ (bmalloc::Heap::scavenge):
+ (bmalloc::Heap::scavengeSmallPages):
+ (bmalloc::Heap::scavengeLargeObjects):
+ (bmalloc::Heap::allocateSmallPage):
+ (bmalloc::Heap::splitAndAllocate):
+ (bmalloc::Heap::deallocateLarge):
+ * bmalloc/Heap.h:
+ (bmalloc::Heap::takeRequestedScavengerThreadQOSClass): Deleted.
+ * bmalloc/VMHeap.h:
+ (bmalloc::VMHeap::deallocateSmallPage):
+ * bmalloc/bmalloc.h:
+ (bmalloc::api::scavenge):
+
2017-04-25 Michael Saboff <msaboff@apple.com>
Call bmalloc scavenger first when handling a memory pressure event
Heap::Heap(std::lock_guard<StaticMutex>&)
: m_vmPageSizePhysical(vmPageSizePhysical())
- , m_isAllocatingPages(false)
, m_scavenger(*this, &Heap::concurrentScavenge)
, m_debugHeap(nullptr)
{
void Heap::concurrentScavenge()
{
- std::unique_lock<StaticMutex> lock(PerProcess<Heap>::mutex());
-
#if BOS(DARWIN)
- if (auto requestedQOSClass = PerProcess<Heap>::getFastCase()->takeRequestedScavengerThreadQOSClass())
- pthread_set_qos_class_self_np(requestedQOSClass, 0);
+ pthread_set_qos_class_self_np(m_requestedScavengerThreadQOSClass, 0);
#endif
- scavenge(lock, scavengeSleepDuration);
+ std::unique_lock<StaticMutex> lock(PerProcess<Heap>::mutex());
+
+ scavenge(lock, Async);
}
-void Heap::scavenge(std::unique_lock<StaticMutex>& lock, std::chrono::milliseconds sleepDuration)
+void Heap::scavenge(std::unique_lock<StaticMutex>& lock, ScavengeMode scavengeMode)
{
- waitUntilFalse(lock, sleepDuration, m_isAllocatingPages);
+ m_isAllocatingPages.fill(false);
+ m_isAllocatingLargePages = false;
- scavengeSmallPages(lock, sleepDuration);
- scavengeLargeObjects(lock, sleepDuration);
+ if (scavengeMode == Async)
+ sleep(lock, scavengeSleepDuration);
- sleep(lock, sleepDuration);
+ scavengeSmallPages(lock, scavengeMode);
+ scavengeLargeObjects(lock, scavengeMode);
}
-void Heap::scavengeSmallPages(std::unique_lock<StaticMutex>& lock, std::chrono::milliseconds sleepDuration)
+void Heap::scavengeSmallPages(std::unique_lock<StaticMutex>& lock, ScavengeMode scavengeMode)
{
- for (auto& smallPages : m_smallPages) {
+ for (size_t pageClass = 0; pageClass < pageClassCount; pageClass++) {
+ auto& smallPages = m_smallPages[pageClass];
+
while (!smallPages.isEmpty()) {
+ if (m_isAllocatingPages[pageClass]) {
+ m_scavenger.run();
+ break;
+ }
+
SmallPage* page = smallPages.pop();
- size_t pageClass = m_pageClasses[page->sizeClass()];
- m_vmHeap.deallocateSmallPage(lock, pageClass, page);
- waitUntilFalse(lock, sleepDuration, m_isAllocatingPages);
+ m_vmHeap.deallocateSmallPage(lock, pageClass, page, scavengeMode);
}
}
}
-void Heap::scavengeLargeObjects(std::unique_lock<StaticMutex>& lock, std::chrono::milliseconds sleepDuration)
+void Heap::scavengeLargeObjects(std::unique_lock<StaticMutex>& lock, ScavengeMode scavengeMode)
{
auto& ranges = m_largeFree.ranges();
for (size_t i = ranges.size(); i-- > 0; i = std::min(i, ranges.size())) {
+ if (m_isAllocatingLargePages) {
+ m_scavenger.run();
+ break;
+ }
+
auto range = ranges.pop(i);
- lock.unlock();
+ if (scavengeMode == Async)
+ lock.unlock();
vmDeallocatePhysicalPagesSloppy(range.begin(), range.size());
- lock.lock();
+ if (scavengeMode == Async)
+ lock.lock();
range.setPhysicalSize(0);
ranges.push(range);
-
- waitUntilFalse(lock, sleepDuration, m_isAllocatingPages);
}
}
if (!m_smallPages[pageClass].isEmpty())
return m_smallPages[pageClass].pop();
- m_isAllocatingPages = true;
+ m_isAllocatingPages[pageClass] = true;
SmallPage* page = m_vmHeap.allocateSmallPage(lock, pageClass);
m_objectTypes.set(Chunk::get(page), ObjectType::Small);
}
if (range.physicalSize() < range.size()) {
- m_isAllocatingPages = true;
+ m_isAllocatingLargePages = true;
vmAllocatePhysicalPagesSloppy(range.begin() + range.physicalSize(), range.size() - range.physicalSize());
range.setPhysicalSize(range.size());
size_t largeSize(std::lock_guard<StaticMutex>&, void*);
void shrinkLarge(std::lock_guard<StaticMutex>&, const Range&, size_t);
- void scavenge(std::unique_lock<StaticMutex>&, std::chrono::milliseconds sleepDuration);
+ void scavenge(std::unique_lock<StaticMutex>&, ScavengeMode);
#if BOS(DARWIN)
- qos_class_t takeRequestedScavengerThreadQOSClass() { return std::exchange(m_requestedScavengerThreadQOSClass, QOS_CLASS_UNSPECIFIED); }
void setScavengerThreadQOSClass(qos_class_t overrideClass) { m_requestedScavengerThreadQOSClass = overrideClass; }
#endif
LargeRange splitAndAllocate(LargeRange&, size_t alignment, size_t);
void concurrentScavenge();
- void scavengeSmallPages(std::unique_lock<StaticMutex>&, std::chrono::milliseconds);
- void scavengeLargeObjects(std::unique_lock<StaticMutex>&, std::chrono::milliseconds);
+ void scavengeSmallPages(std::unique_lock<StaticMutex>&, ScavengeMode);
+ void scavengeLargeObjects(std::unique_lock<StaticMutex>&, ScavengeMode);
size_t m_vmPageSizePhysical;
Vector<LineMetadata> m_smallLineMetadata;
Map<Chunk*, ObjectType, ChunkHash> m_objectTypes;
- bool m_isAllocatingPages;
+ std::array<bool, pageClassCount> m_isAllocatingPages;
+ bool m_isAllocatingLargePages;
+
AsyncTask<Heap, decltype(&Heap::concurrentScavenge)> m_scavenger;
Environment m_environment;
class EndTag;
class Heap;
+typedef enum { Sync, Async } ScavengeMode;
+
class VMHeap {
public:
SmallPage* allocateSmallPage(std::lock_guard<StaticMutex>&, size_t);
- void deallocateSmallPage(std::unique_lock<StaticMutex>&, size_t, SmallPage*);
+ void deallocateSmallPage(std::unique_lock<StaticMutex>&, size_t, SmallPage*, ScavengeMode);
LargeRange tryAllocateLargeChunk(std::lock_guard<StaticMutex>&, size_t alignment, size_t);
return page;
}
-inline void VMHeap::deallocateSmallPage(std::unique_lock<StaticMutex>& lock, size_t pageClass, SmallPage* page)
+inline void VMHeap::deallocateSmallPage(std::unique_lock<StaticMutex>& lock, size_t pageClass, SmallPage* page, ScavengeMode scavengeMode)
{
- lock.unlock();
+ if (scavengeMode == Async)
+ lock.unlock();
vmDeallocatePhysicalPagesSloppy(page->begin()->begin(), pageSize(pageClass));
- lock.lock();
+ if (scavengeMode == Async)
+ lock.lock();
m_smallPages[pageClass].push(page);
}