https://bugs.webkit.org/show_bug.cgi?id=131879
Reviewed by Andreas Kling.
Mutex now has a proper constructor, so you can't deadlock by forgetting
to initialize it.
* bmalloc.xcodeproj/project.pbxproj:
* bmalloc/Allocator.cpp:
(bmalloc::Allocator::processXSmallAllocatorLog):
(bmalloc::Allocator::processSmallAllocatorLog):
(bmalloc::Allocator::processMediumAllocatorLog):
(bmalloc::Allocator::allocateLarge):
(bmalloc::Allocator::allocateXLarge): Global replace Mutex => StaticMutex,
since the Heap mutex is a static.
* bmalloc/AsyncTask.h:
(bmalloc::Function>::AsyncTask): Use Mutex, since we're not static. No
need for explicit initialization anymore.
* bmalloc/Deallocator.cpp:
(bmalloc::Deallocator::scavenge):
(bmalloc::Deallocator::deallocateLarge):
(bmalloc::Deallocator::deallocateXLarge):
(bmalloc::Deallocator::processObjectLog):
(bmalloc::Deallocator::deallocateSmallLine):
(bmalloc::Deallocator::deallocateXSmallLine):
(bmalloc::Deallocator::allocateSmallLine):
(bmalloc::Deallocator::allocateXSmallLine):
(bmalloc::Deallocator::deallocateMediumLine):
(bmalloc::Deallocator::allocateMediumLine):
* bmalloc/Deallocator.h:
* bmalloc/Heap.cpp:
(bmalloc::sleep):
(bmalloc::Heap::Heap):
(bmalloc::Heap::concurrentScavenge):
(bmalloc::Heap::scavenge):
(bmalloc::Heap::scavengeSmallPages):
(bmalloc::Heap::scavengeXSmallPages):
(bmalloc::Heap::scavengeMediumPages):
(bmalloc::Heap::scavengeLargeRanges):
(bmalloc::Heap::allocateXSmallLineSlowCase):
(bmalloc::Heap::allocateSmallLineSlowCase):
(bmalloc::Heap::allocateMediumLineSlowCase):
(bmalloc::Heap::allocateXLarge):
(bmalloc::Heap::deallocateXLarge):
(bmalloc::Heap::allocateLarge):
(bmalloc::Heap::deallocateLarge):
* bmalloc/Heap.h:
(bmalloc::Heap::deallocateXSmallLine):
(bmalloc::Heap::allocateXSmallLine):
(bmalloc::Heap::deallocateSmallLine):
(bmalloc::Heap::allocateSmallLine):
(bmalloc::Heap::deallocateMediumLine):
(bmalloc::Heap::allocateMediumLine):
* bmalloc/Line.h:
(bmalloc::Line<Traits>::deref):
* bmalloc/Mutex.cpp: Removed.
* bmalloc/Mutex.h:
(bmalloc::Mutex::Mutex):
(bmalloc::Mutex::init): Deleted.
(bmalloc::Mutex::try_lock): Deleted.
(bmalloc::Mutex::lock): Deleted.
(bmalloc::Mutex::unlock): Deleted.
* bmalloc/Page.h:
(bmalloc::Page<Traits>::ref):
(bmalloc::Page<Traits>::deref):
(bmalloc::Page<Traits>::refCount):
* bmalloc/PerProcess.h:
(bmalloc::PerProcess::mutex):
(bmalloc::PerProcess<T>::getSlowCase):
* bmalloc/StaticMutex.cpp: Added.
(bmalloc::StaticMutex::lockSlowCase):
* bmalloc/StaticMutex.h: Added.
(bmalloc::StaticMutex::init):
(bmalloc::StaticMutex::try_lock):
(bmalloc::StaticMutex::lock):
(bmalloc::StaticMutex::unlock):
* bmalloc/VMHeap.h:
(bmalloc::VMHeap::deallocateXSmallPage):
(bmalloc::VMHeap::deallocateSmallPage):
(bmalloc::VMHeap::deallocateMediumPage):
(bmalloc::VMHeap::deallocateLargeRange):
* bmalloc/bmalloc.h:
(bmalloc::api::scavenge): Global replace Mutex => StaticMutex,
since the Heap mutex is a static.
git-svn-id: https://svn.webkit.org/repository/webkit/trunk@167540
268f45cc-cd09-0410-ab3c-
d52691b4dbfc
+2014-04-19 Geoffrey Garen <ggaren@apple.com>
+
+ bmalloc: Mutex should be harder to use wrong
+ https://bugs.webkit.org/show_bug.cgi?id=131879
+
+ Reviewed by Andreas Kling.
+
+ Mutex now has a proper constructor, so you can't deadlock by forgetting
+ to initialize it.
+
+ * bmalloc.xcodeproj/project.pbxproj:
+ * bmalloc/Allocator.cpp:
+ (bmalloc::Allocator::processXSmallAllocatorLog):
+ (bmalloc::Allocator::processSmallAllocatorLog):
+ (bmalloc::Allocator::processMediumAllocatorLog):
+ (bmalloc::Allocator::allocateLarge):
+ (bmalloc::Allocator::allocateXLarge): Global replace Mutex => StaticMutex,
+ since the Heap mutex is a static.
+
+ * bmalloc/AsyncTask.h:
+ (bmalloc::Function>::AsyncTask): Use Mutex, since we're not static. No
+ need for explicit initialization anymore.
+
+ * bmalloc/Deallocator.cpp:
+ (bmalloc::Deallocator::scavenge):
+ (bmalloc::Deallocator::deallocateLarge):
+ (bmalloc::Deallocator::deallocateXLarge):
+ (bmalloc::Deallocator::processObjectLog):
+ (bmalloc::Deallocator::deallocateSmallLine):
+ (bmalloc::Deallocator::deallocateXSmallLine):
+ (bmalloc::Deallocator::allocateSmallLine):
+ (bmalloc::Deallocator::allocateXSmallLine):
+ (bmalloc::Deallocator::deallocateMediumLine):
+ (bmalloc::Deallocator::allocateMediumLine):
+ * bmalloc/Deallocator.h:
+ * bmalloc/Heap.cpp:
+ (bmalloc::sleep):
+ (bmalloc::Heap::Heap):
+ (bmalloc::Heap::concurrentScavenge):
+ (bmalloc::Heap::scavenge):
+ (bmalloc::Heap::scavengeSmallPages):
+ (bmalloc::Heap::scavengeXSmallPages):
+ (bmalloc::Heap::scavengeMediumPages):
+ (bmalloc::Heap::scavengeLargeRanges):
+ (bmalloc::Heap::allocateXSmallLineSlowCase):
+ (bmalloc::Heap::allocateSmallLineSlowCase):
+ (bmalloc::Heap::allocateMediumLineSlowCase):
+ (bmalloc::Heap::allocateXLarge):
+ (bmalloc::Heap::deallocateXLarge):
+ (bmalloc::Heap::allocateLarge):
+ (bmalloc::Heap::deallocateLarge):
+ * bmalloc/Heap.h:
+ (bmalloc::Heap::deallocateXSmallLine):
+ (bmalloc::Heap::allocateXSmallLine):
+ (bmalloc::Heap::deallocateSmallLine):
+ (bmalloc::Heap::allocateSmallLine):
+ (bmalloc::Heap::deallocateMediumLine):
+ (bmalloc::Heap::allocateMediumLine):
+ * bmalloc/Line.h:
+ (bmalloc::Line<Traits>::deref):
+ * bmalloc/Mutex.cpp: Removed.
+ * bmalloc/Mutex.h:
+ (bmalloc::Mutex::Mutex):
+ (bmalloc::Mutex::init): Deleted.
+ (bmalloc::Mutex::try_lock): Deleted.
+ (bmalloc::Mutex::lock): Deleted.
+ (bmalloc::Mutex::unlock): Deleted.
+ * bmalloc/Page.h:
+ (bmalloc::Page<Traits>::ref):
+ (bmalloc::Page<Traits>::deref):
+ (bmalloc::Page<Traits>::refCount):
+ * bmalloc/PerProcess.h:
+ (bmalloc::PerProcess::mutex):
+ (bmalloc::PerProcess<T>::getSlowCase):
+ * bmalloc/StaticMutex.cpp: Added.
+ (bmalloc::StaticMutex::lockSlowCase):
+ * bmalloc/StaticMutex.h: Added.
+ (bmalloc::StaticMutex::init):
+ (bmalloc::StaticMutex::try_lock):
+ (bmalloc::StaticMutex::lock):
+ (bmalloc::StaticMutex::unlock):
+ * bmalloc/VMHeap.h:
+ (bmalloc::VMHeap::deallocateXSmallPage):
+ (bmalloc::VMHeap::deallocateSmallPage):
+ (bmalloc::VMHeap::deallocateMediumPage):
+ (bmalloc::VMHeap::deallocateLargeRange):
+ * bmalloc/bmalloc.h:
+ (bmalloc::api::scavenge): Global replace Mutex => StaticMutex,
+ since the Heap mutex is a static.
+
2014-04-18 Geoffrey Garen <ggaren@apple.com>
bmalloc: AsyncTask should use Mutex instead of std::mutex
142FCC7A190080B8009032D4 /* XSmallPage.h in Headers */ = {isa = PBXBuildFile; fileRef = 142FCC76190080B8009032D4 /* XSmallPage.h */; };
142FCC7B190080B8009032D4 /* XSmallTraits.h in Headers */ = {isa = PBXBuildFile; fileRef = 142FCC77190080B8009032D4 /* XSmallTraits.h */; };
142FCC7D1900815E009032D4 /* XSmallAllocator.h in Headers */ = {isa = PBXBuildFile; fileRef = 142FCC7C1900815E009032D4 /* XSmallAllocator.h */; };
+ 143CB81C19022BC900B16A45 /* StaticMutex.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 143CB81A19022BC900B16A45 /* StaticMutex.cpp */; };
+ 143CB81D19022BC900B16A45 /* StaticMutex.h in Headers */ = {isa = PBXBuildFile; fileRef = 143CB81B19022BC900B16A45 /* StaticMutex.h */; };
1448C30018F3754600502839 /* mbmalloc.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1448C2FF18F3754300502839 /* mbmalloc.cpp */; };
1448C30118F3754C00502839 /* bmalloc.h in Headers */ = {isa = PBXBuildFile; fileRef = 1448C2FE18F3754300502839 /* bmalloc.h */; settings = {ATTRIBUTES = (Private, ); }; };
14C919C918FCC59F0028DB43 /* BPlatform.h in Headers */ = {isa = PBXBuildFile; fileRef = 14C919C818FCC59F0028DB43 /* BPlatform.h */; };
14F271C718EA3990008C152F /* Heap.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 14DA320E18875D9F007269E0 /* Heap.cpp */; };
14F271C818EA3990008C152F /* ObjectType.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 14105E8318E14374003A106E /* ObjectType.cpp */; };
14F271C918EA3990008C152F /* VMHeap.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 144F7BFB18BFC517003537F3 /* VMHeap.cpp */; };
- 14F271CA18EA3990008C152F /* Mutex.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 144DCED817A728570093B2F2 /* Mutex.cpp */; };
/* End PBXBuildFile section */
/* Begin PBXContainerItemProxy section */
142FCC76190080B8009032D4 /* XSmallPage.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = XSmallPage.h; path = bmalloc/XSmallPage.h; sourceTree = "<group>"; };
142FCC77190080B8009032D4 /* XSmallTraits.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = XSmallTraits.h; path = bmalloc/XSmallTraits.h; sourceTree = "<group>"; };
142FCC7C1900815E009032D4 /* XSmallAllocator.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = XSmallAllocator.h; path = bmalloc/XSmallAllocator.h; sourceTree = "<group>"; };
+ 143CB81A19022BC900B16A45 /* StaticMutex.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = StaticMutex.cpp; path = bmalloc/StaticMutex.cpp; sourceTree = "<group>"; };
+ 143CB81B19022BC900B16A45 /* StaticMutex.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = StaticMutex.h; path = bmalloc/StaticMutex.h; sourceTree = "<group>"; };
143E29E918CAE8BE00FE8A0F /* MediumPage.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = MediumPage.h; path = bmalloc/MediumPage.h; sourceTree = "<group>"; };
143E29ED18CAE90500FE8A0F /* SmallPage.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = SmallPage.h; path = bmalloc/SmallPage.h; sourceTree = "<group>"; };
144469E417A46BFE00F9EA1D /* Cache.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; lineEnding = 0; name = Cache.cpp; path = bmalloc/Cache.cpp; sourceTree = "<group>"; xcLanguageSpecificationIdentifier = xcode.lang.cpp; };
1448C2FE18F3754300502839 /* bmalloc.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; name = bmalloc.h; path = bmalloc/bmalloc.h; sourceTree = "<group>"; };
1448C2FF18F3754300502839 /* mbmalloc.cpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.cpp; name = mbmalloc.cpp; path = bmalloc/mbmalloc.cpp; sourceTree = "<group>"; };
144DCED617A649D90093B2F2 /* Mutex.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = Mutex.h; path = bmalloc/Mutex.h; sourceTree = "<group>"; };
- 144DCED817A728570093B2F2 /* Mutex.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = Mutex.cpp; path = bmalloc/Mutex.cpp; sourceTree = "<group>"; };
144F7BFB18BFC517003537F3 /* VMHeap.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = VMHeap.cpp; path = bmalloc/VMHeap.cpp; sourceTree = "<group>"; };
144F7BFC18BFC517003537F3 /* VMHeap.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = VMHeap.h; path = bmalloc/VMHeap.h; sourceTree = "<group>"; };
1452478518BC757C00F80098 /* MediumLine.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = MediumLine.h; path = bmalloc/MediumLine.h; sourceTree = "<group>"; };
14C919C818FCC59F0028DB43 /* BPlatform.h */,
14D9DB4517F2447100EAAB79 /* FixedVector.h */,
1413E460189DCE1E00546D68 /* Inline.h */,
- 144DCED817A728570093B2F2 /* Mutex.cpp */,
144DCED617A649D90093B2F2 /* Mutex.h */,
14446A0717A61FA400F9EA1D /* PerProcess.h */,
144469FD17A61F1F00F9EA1D /* PerThread.h */,
145F6878179E3A4400D65598 /* Range.h */,
+ 143CB81A19022BC900B16A45 /* StaticMutex.cpp */,
+ 143CB81B19022BC900B16A45 /* StaticMutex.h */,
1417F64F18B7280C0076FA3F /* Syscall.h */,
1479E21217A1A255006D4E9D /* Vector.h */,
1479E21417A1A63E006D4E9D /* VMAllocate.h */,
14DD78B418F48D6B00950702 /* Chunk.h in Headers */,
14DD78CA18F48D7500950702 /* Mutex.h in Headers */,
142FCC7D1900815E009032D4 /* XSmallAllocator.h in Headers */,
+ 143CB81D19022BC900B16A45 /* StaticMutex.h in Headers */,
14DD78D118F48EC600950702 /* XLargeChunk.h in Headers */,
14DD78B918F48D6B00950702 /* MediumTraits.h in Headers */,
1448C30118F3754C00502839 /* bmalloc.h in Headers */,
isa = PBXSourcesBuildPhase;
buildActionMask = 2147483647;
files = (
- 14F271CA18EA3990008C152F /* Mutex.cpp in Sources */,
+ 143CB81C19022BC900B16A45 /* StaticMutex.cpp in Sources */,
14F271C618EA3983008C152F /* SegregatedFreeList.cpp in Sources */,
14F271C318EA3978008C152F /* Allocator.cpp in Sources */,
14F271C718EA3990008C152F /* Heap.cpp in Sources */,
void Allocator::processXSmallAllocatorLog()
{
- std::lock_guard<Mutex> lock(PerProcess<Heap>::mutex());
+ std::lock_guard<StaticMutex> lock(PerProcess<Heap>::mutex());
for (auto& logEntry : m_xSmallAllocatorLog) {
if (!logEntry.first->deref(lock, logEntry.second))
void Allocator::processSmallAllocatorLog()
{
- std::lock_guard<Mutex> lock(PerProcess<Heap>::mutex());
+ std::lock_guard<StaticMutex> lock(PerProcess<Heap>::mutex());
for (auto& logEntry : m_smallAllocatorLog) {
if (!logEntry.first->deref(lock, logEntry.second))
void Allocator::processMediumAllocatorLog()
{
- std::lock_guard<Mutex> lock(PerProcess<Heap>::mutex());
+ std::lock_guard<StaticMutex> lock(PerProcess<Heap>::mutex());
for (auto& logEntry : m_mediumAllocatorLog) {
if (!logEntry.first->deref(lock, logEntry.second))
void* Allocator::allocateLarge(size_t size)
{
size = roundUpToMultipleOf<largeAlignment>(size);
- std::lock_guard<Mutex> lock(PerProcess<Heap>::mutex());
+ std::lock_guard<StaticMutex> lock(PerProcess<Heap>::mutex());
return PerProcess<Heap>::getFastCase()->allocateLarge(lock, size);
}
void* Allocator::allocateXLarge(size_t size)
{
size = roundUpToMultipleOf<largeAlignment>(size);
- std::lock_guard<Mutex> lock(PerProcess<Heap>::mutex());
+ std::lock_guard<StaticMutex> lock(PerProcess<Heap>::mutex());
return PerProcess<Heap>::getFastCase()->allocateXLarge(lock, size);
}
, m_object(object)
, m_function(function)
{
- m_conditionMutex.init();
}
template<typename Object, typename Function>
{
processObjectLog();
- std::lock_guard<Mutex> lock(PerProcess<Heap>::mutex());
+ std::lock_guard<StaticMutex> lock(PerProcess<Heap>::mutex());
Heap* heap = PerProcess<Heap>::getFastCase();
while (m_xSmallLineCache.size())
void Deallocator::deallocateLarge(void* object)
{
- std::lock_guard<Mutex> lock(PerProcess<Heap>::mutex());
+ std::lock_guard<StaticMutex> lock(PerProcess<Heap>::mutex());
PerProcess<Heap>::getFastCase()->deallocateLarge(lock, object);
}
void Deallocator::deallocateXLarge(void* object)
{
- std::lock_guard<Mutex> lock(PerProcess<Heap>::mutex());
+ std::lock_guard<StaticMutex> lock(PerProcess<Heap>::mutex());
PerProcess<Heap>::getFastCase()->deallocateXLarge(lock, object);
}
void Deallocator::processObjectLog()
{
- std::lock_guard<Mutex> lock(PerProcess<Heap>::mutex());
+ std::lock_guard<StaticMutex> lock(PerProcess<Heap>::mutex());
for (auto object : m_objectLog) {
if (isXSmall(object)) {
return deallocateXLarge(object);
}
-void Deallocator::deallocateSmallLine(std::lock_guard<Mutex>& lock, SmallLine* line)
+void Deallocator::deallocateSmallLine(std::lock_guard<StaticMutex>& lock, SmallLine* line)
{
if (m_smallLineCache.size() == m_smallLineCache.capacity())
return PerProcess<Heap>::getFastCase()->deallocateSmallLine(lock, line);
m_smallLineCache.push(line);
}
-void Deallocator::deallocateXSmallLine(std::lock_guard<Mutex>& lock, XSmallLine* line)
+void Deallocator::deallocateXSmallLine(std::lock_guard<StaticMutex>& lock, XSmallLine* line)
{
if (m_xSmallLineCache.size() == m_xSmallLineCache.capacity())
return PerProcess<Heap>::getFastCase()->deallocateXSmallLine(lock, line);
SmallLine* Deallocator::allocateSmallLine()
{
if (!m_smallLineCache.size()) {
- std::lock_guard<Mutex> lock(PerProcess<Heap>::mutex());
+ std::lock_guard<StaticMutex> lock(PerProcess<Heap>::mutex());
Heap* heap = PerProcess<Heap>::getFastCase();
while (m_smallLineCache.size() != m_smallLineCache.capacity())
XSmallLine* Deallocator::allocateXSmallLine()
{
if (!m_xSmallLineCache.size()) {
- std::lock_guard<Mutex> lock(PerProcess<Heap>::mutex());
+ std::lock_guard<StaticMutex> lock(PerProcess<Heap>::mutex());
Heap* heap = PerProcess<Heap>::getFastCase();
while (m_xSmallLineCache.size() != m_xSmallLineCache.capacity())
return m_xSmallLineCache.pop();
}
-void Deallocator::deallocateMediumLine(std::lock_guard<Mutex>& lock, MediumLine* line)
+void Deallocator::deallocateMediumLine(std::lock_guard<StaticMutex>& lock, MediumLine* line)
{
if (m_mediumLineCache.size() == m_mediumLineCache.capacity())
return PerProcess<Heap>::getFastCase()->deallocateMediumLine(lock, line);
MediumLine* Deallocator::allocateMediumLine()
{
if (!m_mediumLineCache.size()) {
- std::lock_guard<Mutex> lock(PerProcess<Heap>::mutex());
+ std::lock_guard<StaticMutex> lock(PerProcess<Heap>::mutex());
Heap* heap = PerProcess<Heap>::getFastCase();
while (m_mediumLineCache.size() != m_mediumLineCache.capacity())
bool deallocateFastCase(void*);
void deallocateSlowCase(void*);
- void deallocateXSmallLine(std::lock_guard<Mutex>&, XSmallLine*);
+ void deallocateXSmallLine(std::lock_guard<StaticMutex>&, XSmallLine*);
XSmallLine* allocateXSmallLine();
- void deallocateSmallLine(std::lock_guard<Mutex>&, SmallLine*);
+ void deallocateSmallLine(std::lock_guard<StaticMutex>&, SmallLine*);
SmallLine* allocateSmallLine();
- void deallocateMediumLine(std::lock_guard<Mutex>&, MediumLine*);
+ void deallocateMediumLine(std::lock_guard<StaticMutex>&, MediumLine*);
MediumLine* allocateMediumLine();
void scavenge();
namespace bmalloc {
-static inline void sleep(std::unique_lock<Mutex>& lock, std::chrono::milliseconds duration)
+static inline void sleep(std::unique_lock<StaticMutex>& lock, std::chrono::milliseconds duration)
{
if (duration == std::chrono::milliseconds(0))
return;
lock.lock();
}
-Heap::Heap(std::lock_guard<Mutex>&)
+Heap::Heap(std::lock_guard<StaticMutex>&)
: m_isAllocatingPages(false)
, m_scavenger(*this, &Heap::concurrentScavenge)
{
void Heap::concurrentScavenge()
{
- std::unique_lock<Mutex> lock(PerProcess<Heap>::mutex());
+ std::unique_lock<StaticMutex> lock(PerProcess<Heap>::mutex());
scavenge(lock, scavengeSleepDuration);
}
-void Heap::scavenge(std::unique_lock<Mutex>& lock, std::chrono::milliseconds sleepDuration)
+void Heap::scavenge(std::unique_lock<StaticMutex>& lock, std::chrono::milliseconds sleepDuration)
{
scavengeXSmallPages(lock, sleepDuration);
scavengeSmallPages(lock, sleepDuration);
sleep(lock, sleepDuration);
}
-void Heap::scavengeSmallPages(std::unique_lock<Mutex>& lock, std::chrono::milliseconds sleepDuration)
+void Heap::scavengeSmallPages(std::unique_lock<StaticMutex>& lock, std::chrono::milliseconds sleepDuration)
{
while (1) {
if (m_isAllocatingPages) {
}
}
-void Heap::scavengeXSmallPages(std::unique_lock<Mutex>& lock, std::chrono::milliseconds sleepDuration)
+void Heap::scavengeXSmallPages(std::unique_lock<StaticMutex>& lock, std::chrono::milliseconds sleepDuration)
{
while (1) {
if (m_isAllocatingPages) {
}
}
-void Heap::scavengeMediumPages(std::unique_lock<Mutex>& lock, std::chrono::milliseconds sleepDuration)
+void Heap::scavengeMediumPages(std::unique_lock<StaticMutex>& lock, std::chrono::milliseconds sleepDuration)
{
while (1) {
if (m_isAllocatingPages) {
}
}
-void Heap::scavengeLargeRanges(std::unique_lock<Mutex>& lock, std::chrono::milliseconds sleepDuration)
+void Heap::scavengeLargeRanges(std::unique_lock<StaticMutex>& lock, std::chrono::milliseconds sleepDuration)
{
while (1) {
if (m_isAllocatingPages) {
}
}
-XSmallLine* Heap::allocateXSmallLineSlowCase(std::lock_guard<Mutex>& lock)
+XSmallLine* Heap::allocateXSmallLineSlowCase(std::lock_guard<StaticMutex>& lock)
{
m_isAllocatingPages = true;
return line;
}
-SmallLine* Heap::allocateSmallLineSlowCase(std::lock_guard<Mutex>& lock)
+SmallLine* Heap::allocateSmallLineSlowCase(std::lock_guard<StaticMutex>& lock)
{
m_isAllocatingPages = true;
return line;
}
-MediumLine* Heap::allocateMediumLineSlowCase(std::lock_guard<Mutex>& lock)
+MediumLine* Heap::allocateMediumLineSlowCase(std::lock_guard<StaticMutex>& lock)
{
m_isAllocatingPages = true;
return line;
}
-void* Heap::allocateXLarge(std::lock_guard<Mutex>&, size_t size)
+void* Heap::allocateXLarge(std::lock_guard<StaticMutex>&, size_t size)
{
XLargeChunk* chunk = XLargeChunk::create(size);
return chunk->begin();
}
-void Heap::deallocateXLarge(std::lock_guard<Mutex>&, void* object)
+void Heap::deallocateXLarge(std::lock_guard<StaticMutex>&, void* object)
{
XLargeChunk* chunk = XLargeChunk::get(object);
XLargeChunk::destroy(chunk);
}
-void* Heap::allocateLarge(std::lock_guard<Mutex>&, size_t size)
+void* Heap::allocateLarge(std::lock_guard<StaticMutex>&, size_t size)
{
BASSERT(size <= largeMax);
BASSERT(size >= largeMin);
return range.begin();
}
-void Heap::deallocateLarge(std::lock_guard<Mutex>&, void* object)
+void Heap::deallocateLarge(std::lock_guard<StaticMutex>&, void* object)
{
Range range = BoundaryTag::deallocate(object);
m_largeRanges.insert(range);
class Heap {
public:
- Heap(std::lock_guard<Mutex>&);
+ Heap(std::lock_guard<StaticMutex>&);
- XSmallLine* allocateXSmallLine(std::lock_guard<Mutex>&);
- void deallocateXSmallLine(std::lock_guard<Mutex>&, XSmallLine*);
+ XSmallLine* allocateXSmallLine(std::lock_guard<StaticMutex>&);
+ void deallocateXSmallLine(std::lock_guard<StaticMutex>&, XSmallLine*);
- SmallLine* allocateSmallLine(std::lock_guard<Mutex>&);
- void deallocateSmallLine(std::lock_guard<Mutex>&, SmallLine*);
+ SmallLine* allocateSmallLine(std::lock_guard<StaticMutex>&);
+ void deallocateSmallLine(std::lock_guard<StaticMutex>&, SmallLine*);
- MediumLine* allocateMediumLine(std::lock_guard<Mutex>&);
- void deallocateMediumLine(std::lock_guard<Mutex>&, MediumLine*);
+ MediumLine* allocateMediumLine(std::lock_guard<StaticMutex>&);
+ void deallocateMediumLine(std::lock_guard<StaticMutex>&, MediumLine*);
- void* allocateLarge(std::lock_guard<Mutex>&, size_t);
- void deallocateLarge(std::lock_guard<Mutex>&, void*);
+ void* allocateLarge(std::lock_guard<StaticMutex>&, size_t);
+ void deallocateLarge(std::lock_guard<StaticMutex>&, void*);
- void* allocateXLarge(std::lock_guard<Mutex>&, size_t);
- void deallocateXLarge(std::lock_guard<Mutex>&, void*);
+ void* allocateXLarge(std::lock_guard<StaticMutex>&, size_t);
+ void deallocateXLarge(std::lock_guard<StaticMutex>&, void*);
- void scavenge(std::unique_lock<Mutex>&, std::chrono::milliseconds sleepDuration);
+ void scavenge(std::unique_lock<StaticMutex>&, std::chrono::milliseconds sleepDuration);
private:
~Heap() = delete;
- XSmallLine* allocateXSmallLineSlowCase(std::lock_guard<Mutex>&);
- SmallLine* allocateSmallLineSlowCase(std::lock_guard<Mutex>&);
- MediumLine* allocateMediumLineSlowCase(std::lock_guard<Mutex>&);
+ XSmallLine* allocateXSmallLineSlowCase(std::lock_guard<StaticMutex>&);
+ SmallLine* allocateSmallLineSlowCase(std::lock_guard<StaticMutex>&);
+ MediumLine* allocateMediumLineSlowCase(std::lock_guard<StaticMutex>&);
void* allocateLarge(Range, size_t);
Range allocateLargeChunk();
void mergeLargeRight(EndTag*&, BeginTag*&, Range&, bool& hasPhysicalPages);
void concurrentScavenge();
- void scavengeXSmallPages(std::unique_lock<Mutex>&, std::chrono::milliseconds);
- void scavengeSmallPages(std::unique_lock<Mutex>&, std::chrono::milliseconds);
- void scavengeMediumPages(std::unique_lock<Mutex>&, std::chrono::milliseconds);
- void scavengeLargeRanges(std::unique_lock<Mutex>&, std::chrono::milliseconds);
+ void scavengeXSmallPages(std::unique_lock<StaticMutex>&, std::chrono::milliseconds);
+ void scavengeSmallPages(std::unique_lock<StaticMutex>&, std::chrono::milliseconds);
+ void scavengeMediumPages(std::unique_lock<StaticMutex>&, std::chrono::milliseconds);
+ void scavengeLargeRanges(std::unique_lock<StaticMutex>&, std::chrono::milliseconds);
Vector<XSmallLine*> m_xSmallLines;
Vector<SmallLine*> m_smallLines;
AsyncTask<Heap, decltype(&Heap::concurrentScavenge)> m_scavenger;
};
-inline void Heap::deallocateXSmallLine(std::lock_guard<Mutex>& lock, XSmallLine* line)
+inline void Heap::deallocateXSmallLine(std::lock_guard<StaticMutex>& lock, XSmallLine* line)
{
XSmallPage* page = XSmallPage::get(line);
if (page->deref(lock)) {
m_xSmallLines.push(line);
}
-inline XSmallLine* Heap::allocateXSmallLine(std::lock_guard<Mutex>& lock)
+inline XSmallLine* Heap::allocateXSmallLine(std::lock_guard<StaticMutex>& lock)
{
while (m_xSmallLines.size()) {
XSmallLine* line = m_xSmallLines.pop();
return allocateXSmallLineSlowCase(lock);
}
-inline void Heap::deallocateSmallLine(std::lock_guard<Mutex>& lock, SmallLine* line)
+inline void Heap::deallocateSmallLine(std::lock_guard<StaticMutex>& lock, SmallLine* line)
{
SmallPage* page = SmallPage::get(line);
if (page->deref(lock)) {
m_smallLines.push(line);
}
-inline SmallLine* Heap::allocateSmallLine(std::lock_guard<Mutex>& lock)
+inline SmallLine* Heap::allocateSmallLine(std::lock_guard<StaticMutex>& lock)
{
while (m_smallLines.size()) {
SmallLine* line = m_smallLines.pop();
return allocateSmallLineSlowCase(lock);
}
-inline void Heap::deallocateMediumLine(std::lock_guard<Mutex>& lock, MediumLine* line)
+inline void Heap::deallocateMediumLine(std::lock_guard<StaticMutex>& lock, MediumLine* line)
{
MediumPage* page = MediumPage::get(line);
if (page->deref(lock)) {
m_mediumLines.push(line);
}
-inline MediumLine* Heap::allocateMediumLine(std::lock_guard<Mutex>& lock)
+inline MediumLine* Heap::allocateMediumLine(std::lock_guard<StaticMutex>& lock)
{
while (m_mediumLines.size()) {
MediumLine* line = m_mediumLines.pop();
static Line* get(void*);
void concurrentRef(unsigned char = 1);
- bool deref(std::lock_guard<Mutex>&, unsigned char = 1);
+ bool deref(std::lock_guard<StaticMutex>&, unsigned char = 1);
char* begin();
char* end();
}
template<class Traits>
-inline bool Line<Traits>::deref(std::lock_guard<Mutex>&, unsigned char count)
+inline bool Line<Traits>::deref(std::lock_guard<StaticMutex>&, unsigned char count)
{
BASSERT(count <= m_refCount);
m_refCount -= count;
#ifndef Mutex_h
#define Mutex_h
-#include "BAssert.h"
-#include <atomic>
+#include "StaticMutex.h"
-// A replacement for std::mutex that does not require an exit-time destructor.
+// A fast replacement for std::mutex.
namespace bmalloc {
-struct Mutex {
+class Mutex : public StaticMutex {
public:
- void init();
-
- void lock();
- bool try_lock();
- void unlock();
-
-private:
- void lockSlowCase();
-
- std::atomic_flag m_flag;
+ Mutex();
};
-inline void Mutex::init()
-{
- m_flag.clear();
-}
-
-inline bool Mutex::try_lock()
-{
- return !m_flag.test_and_set(std::memory_order_acquire);
-}
-
-inline void Mutex::lock()
-{
- if (!try_lock())
- lockSlowCase();
-}
-
-inline void Mutex::unlock()
+inline Mutex::Mutex()
{
- m_flag.clear(std::memory_order_release);
+ init();
}
} // namespace bmalloc
static Page* get(Line*);
- void ref(std::lock_guard<Mutex>&);
- bool deref(std::lock_guard<Mutex>&);
- unsigned refCount(std::lock_guard<Mutex>&);
+ void ref(std::lock_guard<StaticMutex>&);
+ bool deref(std::lock_guard<StaticMutex>&);
+ unsigned refCount(std::lock_guard<StaticMutex>&);
Line* begin();
Line* end();
};
template<typename Traits>
-inline void Page<Traits>::ref(std::lock_guard<Mutex>&)
+inline void Page<Traits>::ref(std::lock_guard<StaticMutex>&)
{
BASSERT(m_refCount < maxRefCount);
++m_refCount;
}
template<typename Traits>
-inline bool Page<Traits>::deref(std::lock_guard<Mutex>&)
+inline bool Page<Traits>::deref(std::lock_guard<StaticMutex>&)
{
BASSERT(m_refCount);
--m_refCount;
}
template<typename Traits>
-inline unsigned Page<Traits>::refCount(std::lock_guard<Mutex>&)
+inline unsigned Page<Traits>::refCount(std::lock_guard<StaticMutex>&)
{
return m_refCount;
}
#define PerProcess_h
#include "Inline.h"
-#include "Mutex.h"
#include "Sizes.h"
+#include "StaticMutex.h"
#include <mutex>
namespace bmalloc {
// x = object->m_field; // OK
// if (gobalFlag) { ... } // Undefined behavior.
//
-// std::lock_guard<Mutex> lock(PerProcess<Object>::mutex());
+// std::lock_guard<StaticMutex> lock(PerProcess<Object>::mutex());
// Object* object = PerProcess<Object>::get(lock);
// if (gobalFlag) { ... } // OK.
static T* get();
static T* getFastCase();
- static Mutex& mutex() { return s_mutex; }
+ static StaticMutex& mutex() { return s_mutex; }
private:
static T* getSlowCase();
static std::atomic<T*> s_object;
- static Mutex s_mutex;
+ static StaticMutex s_mutex;
typedef typename std::aligned_storage<sizeof(T), std::alignment_of<T>::value>::type Memory;
static Memory s_memory;
template<typename T>
NO_INLINE T* PerProcess<T>::getSlowCase()
{
- std::lock_guard<Mutex> lock(s_mutex);
+ std::lock_guard<StaticMutex> lock(s_mutex);
if (!s_object.load(std::memory_order_consume)) {
T* t = new (&s_memory) T(lock);
s_object.store(t, std::memory_order_release);
std::atomic<T*> PerProcess<T>::s_object;
template<typename T>
-Mutex PerProcess<T>::s_mutex;
+StaticMutex PerProcess<T>::s_mutex;
template<typename T>
typename PerProcess<T>::Memory PerProcess<T>::s_memory;
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#include "Mutex.h"
+#include "StaticMutex.h"
#include <thread>
namespace bmalloc {
-void Mutex::lockSlowCase()
+void StaticMutex::lockSlowCase()
{
while (!try_lock())
std::this_thread::yield();
--- /dev/null
+/*
+ * Copyright (C) 2014 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef StaticMutex_h
+#define StaticMutex_h
+
+#include "BAssert.h"
+#include <atomic>
+
+// A fast replacement for std::mutex for use in static storage, where global
+// constructors and exit-time destructors are prohibited.
+
+namespace bmalloc {
+
+class StaticMutex {
+public:
+ void lock();
+ bool try_lock();
+ void unlock();
+
+private:
+ friend class Mutex;
+
+ // Static storage will zero-initialize us automatically, but Mutex needs an
+ // API for explicit initialization.
+ void init();
+
+ void lockSlowCase();
+
+ std::atomic_flag m_flag;
+};
+
+inline void StaticMutex::init()
+{
+ m_flag.clear();
+}
+
+inline bool StaticMutex::try_lock()
+{
+ return !m_flag.test_and_set(std::memory_order_acquire);
+}
+
+inline void StaticMutex::lock()
+{
+ if (!try_lock())
+ lockSlowCase();
+}
+
+inline void StaticMutex::unlock()
+{
+ m_flag.clear(std::memory_order_release);
+}
+
+} // namespace bmalloc
+
+#endif // StaticMutex_h
MediumPage* allocateMediumPage();
Range allocateLargeRange(size_t);
- void deallocateXSmallPage(std::unique_lock<Mutex>&, XSmallPage*);
- void deallocateSmallPage(std::unique_lock<Mutex>&, SmallPage*);
- void deallocateMediumPage(std::unique_lock<Mutex>&, MediumPage*);
- void deallocateLargeRange(std::unique_lock<Mutex>&, Range);
+ void deallocateXSmallPage(std::unique_lock<StaticMutex>&, XSmallPage*);
+ void deallocateSmallPage(std::unique_lock<StaticMutex>&, SmallPage*);
+ void deallocateMediumPage(std::unique_lock<StaticMutex>&, MediumPage*);
+ void deallocateLargeRange(std::unique_lock<StaticMutex>&, Range);
private:
void allocateXSmallChunk();
return range;
}
-inline void VMHeap::deallocateXSmallPage(std::unique_lock<Mutex>& lock, XSmallPage* page)
+inline void VMHeap::deallocateXSmallPage(std::unique_lock<StaticMutex>& lock, XSmallPage* page)
{
lock.unlock();
vmDeallocatePhysicalPages(page->begin()->begin(), vmPageSize);
m_xSmallPages.push(page);
}
-inline void VMHeap::deallocateSmallPage(std::unique_lock<Mutex>& lock, SmallPage* page)
+inline void VMHeap::deallocateSmallPage(std::unique_lock<StaticMutex>& lock, SmallPage* page)
{
lock.unlock();
vmDeallocatePhysicalPages(page->begin()->begin(), vmPageSize);
m_smallPages.push(page);
}
-inline void VMHeap::deallocateMediumPage(std::unique_lock<Mutex>& lock, MediumPage* page)
+inline void VMHeap::deallocateMediumPage(std::unique_lock<StaticMutex>& lock, MediumPage* page)
{
lock.unlock();
vmDeallocatePhysicalPages(page->begin()->begin(), vmPageSize);
m_mediumPages.push(page);
}
-inline void VMHeap::deallocateLargeRange(std::unique_lock<Mutex>& lock, Range range)
+inline void VMHeap::deallocateLargeRange(std::unique_lock<StaticMutex>& lock, Range range)
{
BeginTag* beginTag = LargeChunk::beginTag(range.begin());
EndTag* endTag = LargeChunk::endTag(range.begin(), range.size());
{
PerThread<Cache>::get()->scavenge();
- std::unique_lock<Mutex> lock(PerProcess<Heap>::mutex());
+ std::unique_lock<StaticMutex> lock(PerProcess<Heap>::mutex());
PerProcess<Heap>::get()->scavenge(lock, std::chrono::milliseconds(0));
}