https://bugs.webkit.org/show_bug.cgi?id=136693
Reviewed by Gavin Barraclough.
4% reduction in heap size on the MallocBench *_memory_warning benchmarks.
No throughput change.
We keep an array of medium allocators, just like our array of small
allocators.
In future, we can simplify the allocation fast path by merging the small
and medium allocator arrays. For now, this is the simplest change that
gets the win.
* bmalloc/Allocator.cpp:
(bmalloc::Allocator::Allocator):
(bmalloc::Allocator::scavenge):
(bmalloc::Allocator::allocateMedium):
* bmalloc/Allocator.h:
* bmalloc/Sizes.h:
(bmalloc::Sizes::mediumSizeClassFor):
git-svn-id: https://svn.webkit.org/repository/webkit/trunk@173538
268f45cc-cd09-0410-ab3c-
d52691b4dbfc
2014-09-11 Geoffrey Garen <ggaren@apple.com>
+ bmalloc should segregate medium-sized objects by line like it does for small-sized objects
+ https://bugs.webkit.org/show_bug.cgi?id=136693
+
+ Reviewed by Gavin Barraclough.
+
+ 4% reduction in heap size on the MallocBench *_memory_warning benchmarks.
+
+ No throughput change.
+
+ We keep an array of medium allocators, just like our array of small
+ allocators.
+
+ In future, we can simplify the allocation fast path by merging the small
+ and medium allocator arrays. For now, this is the simplest change that
+ gets the win.
+
+ * bmalloc/Allocator.cpp:
+ (bmalloc::Allocator::Allocator):
+ (bmalloc::Allocator::scavenge):
+ (bmalloc::Allocator::allocateMedium):
+ * bmalloc/Allocator.h:
+ * bmalloc/Sizes.h:
+ (bmalloc::Sizes::mediumSizeClassFor):
+
+2014-09-11 Geoffrey Garen <ggaren@apple.com>
+
Reviewed by Sam Weinig.
Renamed log => retire for clarity.
Allocator::Allocator(Deallocator& deallocator)
: m_deallocator(deallocator)
, m_smallAllocators()
- , m_mediumAllocator()
+ , m_mediumAllocators()
, m_smallAllocatorLog()
, m_mediumAllocatorLog()
{
{
scavenge();
}
-
+
void Allocator::scavenge()
{
for (auto& allocator : m_smallAllocators) {
}
processSmallAllocatorLog();
- retire(m_mediumAllocator);
- m_mediumAllocator.clear();
+ for (auto& allocator : m_mediumAllocators) {
+ retire(allocator);
+ allocator.clear();
+ }
processMediumAllocatorLog();
}
void* Allocator::allocateMedium(size_t size)
{
- MediumAllocator& allocator = m_mediumAllocator;
+ MediumAllocator& allocator = m_mediumAllocators[mediumSizeClassFor(size)];
size = roundUpToMultipleOf<alignment>(size);
void* object;
Deallocator& m_deallocator;
std::array<SmallAllocator, smallMax / alignment> m_smallAllocators;
- MediumAllocator m_mediumAllocator;
+ std::array<MediumAllocator, mediumMax / alignment> m_mediumAllocators;
FixedVector<std::pair<SmallLine*, unsigned char>, smallAllocatorLogCapacity> m_smallAllocatorLog;
FixedVector<std::pair<MediumLine*, unsigned char>, mediumAllocatorLogCapacity> m_mediumAllocatorLog;
static const size_t largeAlignmentShift = 6;
static_assert(1 << largeAlignmentShift == largeAlignment, "largeAlignmentShift be log2(largeAlignment).");
static const size_t largeMax = largeChunkSize * 99 / 100; // Plenty of room for metadata.
- static const size_t largeMin = 1024;
+ static const size_t largeMin = mediumMax;
static const size_t segregatedFreeListSearchDepth = 16;
static const size_t smallSizeClassMask = (smallMax / alignment) - 1;
return mask((size - 1ul) / alignment, smallSizeClassMask);
}
+
+ inline size_t mediumSizeClassFor(size_t size)
+ {
+ static const size_t mediumSizeClassMask = (mediumMax / alignment) - 1;
+ return mask((size - 1ul) / alignment, mediumSizeClassMask);
+ }
};
using namespace Sizes;