[bmalloc] Follow-up and fixing bug after r244481
authorysuzuki@apple.com <ysuzuki@apple.com@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Thu, 25 Apr 2019 21:52:59 +0000 (21:52 +0000)
committerysuzuki@apple.com <ysuzuki@apple.com@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Thu, 25 Apr 2019 21:52:59 +0000 (21:52 +0000)
https://bugs.webkit.org/show_bug.cgi?id=197294

Reviewed by Saam Barati.

This patch includes follow-up after r244481 and bug fixes which is introduced in the refactoring.

* bmalloc/IsoAllocator.h: Remove unused function.
* bmalloc/IsoAllocatorInlines.h:
(bmalloc::IsoAllocator<Config>::allocateSlow):
* bmalloc/IsoDeallocatorInlines.h:
(bmalloc::IsoDeallocator<Config>::deallocate):
* bmalloc/IsoHeapImpl.h: Rename m_usableBits to m_availableShared and add static_assert.
* bmalloc/IsoHeapImplInlines.h: Do not clear m_numberOfAllocationsFromSharedInOneCycle etc. in scavenge since IsoHeapImpl::scavenge
is not related to thread-local IsoAllocator's status.
(bmalloc::IsoHeapImpl<Config>::scavenge):
(bmalloc::IsoHeapImpl<Config>::forEachLiveObject):
(bmalloc::IsoHeapImpl<Config>::updateAllocationMode): Update m_allocationMode correctly.
(bmalloc::IsoHeapImpl<Config>::allocateFromShared):
* bmalloc/IsoSharedHeapInlines.h:
(bmalloc::computeObjectSizeForSharedCell):
(bmalloc::IsoSharedHeap::allocateNew):
(bmalloc::IsoSharedHeap::allocateSlow): Add computeObjectSizeForSharedCell.
* bmalloc/IsoSharedPage.h:
* bmalloc/IsoSharedPageInlines.h:
(bmalloc::IsoSharedPage::free): Pass `const std::lock_guard<Mutex>&` in its parameter.

git-svn-id: https://svn.webkit.org/repository/webkit/trunk@244666 268f45cc-cd09-0410-ab3c-d52691b4dbfc

Source/bmalloc/ChangeLog
Source/bmalloc/bmalloc/IsoAllocator.h
Source/bmalloc/bmalloc/IsoAllocatorInlines.h
Source/bmalloc/bmalloc/IsoDeallocatorInlines.h
Source/bmalloc/bmalloc/IsoHeapImpl.h
Source/bmalloc/bmalloc/IsoHeapImplInlines.h
Source/bmalloc/bmalloc/IsoSharedHeapInlines.h
Source/bmalloc/bmalloc/IsoSharedPage.h
Source/bmalloc/bmalloc/IsoSharedPageInlines.h

index b93b1f1..e4fdfc7 100644 (file)
@@ -1,3 +1,32 @@
+2019-04-25  Yusuke Suzuki  <ysuzuki@apple.com>
+
+        [bmalloc] Follow-up and fixing bug after r244481
+        https://bugs.webkit.org/show_bug.cgi?id=197294
+
+        Reviewed by Saam Barati.
+
+        This patch includes follow-up after r244481 and bug fixes which is introduced in the refactoring.
+
+        * bmalloc/IsoAllocator.h: Remove unused function.
+        * bmalloc/IsoAllocatorInlines.h:
+        (bmalloc::IsoAllocator<Config>::allocateSlow):
+        * bmalloc/IsoDeallocatorInlines.h:
+        (bmalloc::IsoDeallocator<Config>::deallocate):
+        * bmalloc/IsoHeapImpl.h: Rename m_usableBits to m_availableShared and add static_assert.
+        * bmalloc/IsoHeapImplInlines.h: Do not clear m_numberOfAllocationsFromSharedInOneCycle etc. in scavenge since IsoHeapImpl::scavenge
+        is not related to thread-local IsoAllocator's status.
+        (bmalloc::IsoHeapImpl<Config>::scavenge):
+        (bmalloc::IsoHeapImpl<Config>::forEachLiveObject):
+        (bmalloc::IsoHeapImpl<Config>::updateAllocationMode): Update m_allocationMode correctly.
+        (bmalloc::IsoHeapImpl<Config>::allocateFromShared):
+        * bmalloc/IsoSharedHeapInlines.h:
+        (bmalloc::computeObjectSizeForSharedCell):
+        (bmalloc::IsoSharedHeap::allocateNew):
+        (bmalloc::IsoSharedHeap::allocateSlow): Add computeObjectSizeForSharedCell.
+        * bmalloc/IsoSharedPage.h:
+        * bmalloc/IsoSharedPageInlines.h:
+        (bmalloc::IsoSharedPage::free): Pass `const std::lock_guard<Mutex>&` in its parameter.
+
 2019-04-25  Alex Christensen  <achristensen@webkit.org>
 
         Start using C++17
index 30472c1..a0f7080 100644 (file)
@@ -40,8 +40,6 @@ public:
     IsoAllocator(IsoHeapImpl<Config>&);
     ~IsoAllocator();
     
-    AllocationMode considerUsingSharedAllocation();
-
     void* allocate(bool abortOnFailure);
     void scavenge();
     
index 606b4b8..5455ffd 100644 (file)
@@ -69,7 +69,7 @@ BNO_INLINE void* IsoAllocator<Config>::allocateSlow(bool abortOnFailure)
             m_currentPage = nullptr;
             m_freeList.clear();
         }
-        return m_heap->allocateFromShared(abortOnFailure);
+        return m_heap->allocateFromShared(locker, abortOnFailure);
     }
 
     BASSERT(allocationMode == AllocationMode::Fast);
index 7103ecb..23f32a3 100644 (file)
@@ -60,7 +60,7 @@ void IsoDeallocator<Config>::deallocate(api::IsoHeap<Type>& handle, void* ptr)
     IsoPageBase* page = IsoPageBase::pageFor(ptr);
     if (page->isShared()) {
         std::lock_guard<Mutex> locker(*m_lock);
-        static_cast<IsoSharedPage*>(page)->free<Config>(handle, ptr);
+        static_cast<IsoSharedPage*>(page)->free<Config>(locker, handle, ptr);
         return;
     }
 
index a90197a..9ed3cc5 100644 (file)
@@ -60,11 +60,13 @@ protected:
     friend class AllIsoHeaps;
     
     IsoHeapImplBase* m_next { nullptr };
-    std::chrono::steady_clock::time_point m_slowPathTimePoint;
+    std::chrono::steady_clock::time_point m_lastSlowPathTime;
     std::array<void*, maxAllocationFromShared> m_sharedCells { };
     unsigned m_numberOfAllocationsFromSharedInOneCycle { 0 };
-    unsigned m_usableBits { maxAllocationFromSharedMask };
+    unsigned m_availableShared { maxAllocationFromSharedMask };
     AllocationMode m_allocationMode { AllocationMode::Init };
+    
+    static_assert(sizeof(m_availableShared) * 8 >= maxAllocationFromShared, "");
 };
 
 template<typename Config>
@@ -111,7 +113,7 @@ public:
     void isNoLongerFreeable(void* ptr, size_t bytes);
 
     AllocationMode updateAllocationMode();
-    void* allocateFromShared(bool abortOnFailure);
+    void* allocateFromShared(const std::lock_guard<Mutex>&, bool abortOnFailure);
     
     // It's almost always the caller's responsibility to grab the lock. This lock comes from the
     // PerProcess<IsoTLSDeallocatorEntry<Config>>::get()->lock. That's pretty weird, and we don't
index f650280..ba41093 100644 (file)
@@ -109,8 +109,6 @@ void IsoHeapImpl<Config>::scavenge(Vector<DeferredDecommit>& decommits)
             directory.scavenge(decommits);
         });
     m_directoryHighWatermark = 0;
-    m_numberOfAllocationsFromSharedInOneCycle = 0;
-    m_allocationMode = AllocationMode::Init;
 }
 
 template<typename Config>
@@ -182,7 +180,7 @@ void IsoHeapImpl<Config>::forEachLiveObject(const Func& func)
         });
     for (unsigned index = 0; index < maxAllocationFromShared; ++index) {
         void* pointer = m_sharedCells[index];
-        if (pointer && !(m_usableBits & (1U << index)))
+        if (pointer && !(m_availableShared & (1U << index)))
             func(pointer);
     }
 }
@@ -233,55 +231,60 @@ void IsoHeapImpl<Config>::isNoLongerFreeable(void* ptr, size_t bytes)
 template<typename Config>
 AllocationMode IsoHeapImpl<Config>::updateAllocationMode()
 {
-    // Exhaust shared free cells, which means we should start activating the fast allocation mode for this type.
-    if (!m_usableBits) {
-        m_slowPathTimePoint = std::chrono::steady_clock::now();
-        return AllocationMode::Fast;
-    }
+    auto getNewAllocationMode = [&] {
+        // Exhaust shared free cells, which means we should start activating the fast allocation mode for this type.
+        if (!m_availableShared) {
+            m_lastSlowPathTime = std::chrono::steady_clock::now();
+            return AllocationMode::Fast;
+        }
 
-    switch (m_allocationMode) {
-    case AllocationMode::Shared:
-        // Currently in the shared allocation mode. Until we exhaust shared free cells, continue using the shared allocation mode.
-        // But if we allocate so many shared cells within very short period, we should use the fast allocation mode instead.
-        // This avoids the following pathological case.
-        //
-        //     for (int i = 0; i < 1e6; ++i) {
-        //         auto* ptr = allocate();
-        //         ...
-        //         free(ptr);
-        //     }
-        if (m_numberOfAllocationsFromSharedInOneCycle <= IsoPage<Config>::numObjects)
-            return AllocationMode::Shared;
-        BFALLTHROUGH;
+        switch (m_allocationMode) {
+        case AllocationMode::Shared:
+            // Currently in the shared allocation mode. Until we exhaust shared free cells, continue using the shared allocation mode.
+            // But if we allocate so many shared cells within very short period, we should use the fast allocation mode instead.
+            // This avoids the following pathological case.
+            //
+            //     for (int i = 0; i < 1e6; ++i) {
+            //         auto* ptr = allocate();
+            //         ...
+            //         free(ptr);
+            //     }
+            if (m_numberOfAllocationsFromSharedInOneCycle <= IsoPage<Config>::numObjects)
+                return AllocationMode::Shared;
+            BFALLTHROUGH;
 
-    case AllocationMode::Fast: {
-        // The allocation pattern may change. We should check the allocation rate and decide which mode is more appropriate.
-        // If we don't go to the allocation slow path during 1~ seconds, we think the allocation becomes quiescent state.
-        auto now = std::chrono::steady_clock::now();
-        if ((now - m_slowPathTimePoint) < std::chrono::seconds(1)) {
-            m_slowPathTimePoint = now;
-            return AllocationMode::Fast;
+        case AllocationMode::Fast: {
+            // The allocation pattern may change. We should check the allocation rate and decide which mode is more appropriate.
+            // If we don't go to the allocation slow path during ~1 seconds, we think the allocation becomes quiescent state.
+            auto now = std::chrono::steady_clock::now();
+            if ((now - m_lastSlowPathTime) < std::chrono::seconds(1)) {
+                m_lastSlowPathTime = now;
+                return AllocationMode::Fast;
+            }
+
+            m_numberOfAllocationsFromSharedInOneCycle = 0;
+            m_lastSlowPathTime = now;
+            return AllocationMode::Shared;
         }
 
-        m_numberOfAllocationsFromSharedInOneCycle = 0;
-        m_slowPathTimePoint = now;
-        return AllocationMode::Shared;
-    }
+        case AllocationMode::Init:
+            m_lastSlowPathTime = std::chrono::steady_clock::now();
+            return AllocationMode::Shared;
+        }
 
-    case AllocationMode::Init:
-        m_slowPathTimePoint = std::chrono::steady_clock::now();
         return AllocationMode::Shared;
-    }
-
-    return AllocationMode::Shared;
+    };
+    AllocationMode allocationMode = getNewAllocationMode();
+    m_allocationMode = allocationMode;
+    return allocationMode;
 }
 
 template<typename Config>
-void* IsoHeapImpl<Config>::allocateFromShared(bool abortOnFailure)
+void* IsoHeapImpl<Config>::allocateFromShared(const std::lock_guard<Mutex>&, bool abortOnFailure)
 {
     static constexpr bool verbose = false;
 
-    unsigned indexPlusOne = __builtin_ffs(m_usableBits);
+    unsigned indexPlusOne = __builtin_ffs(m_availableShared);
     BASSERT(indexPlusOne);
     unsigned index = indexPlusOne - 1;
     void* result = m_sharedCells[index];
@@ -300,7 +303,7 @@ void* IsoHeapImpl<Config>::allocateFromShared(bool abortOnFailure)
         m_sharedCells[index] = result;
     }
     BASSERT(result);
-    m_usableBits &= (~(1U << index));
+    m_availableShared &= ~(1U << index);
     ++m_numberOfAllocationsFromSharedInOneCycle;
     return result;
 }
index 3e6356a..d9f7f11 100644 (file)
@@ -43,11 +43,16 @@ void* VariadicBumpAllocator::allocate(const Func& slowPath)
     return slowPath();
 }
 
+inline constexpr unsigned computeObjectSizeForSharedCell(unsigned objectSize)
+{
+    return roundUpToMultipleOf<alignmentForIsoSharedAllocation>(static_cast<uintptr_t>(objectSize));
+}
+
 template<unsigned passedObjectSize>
 void* IsoSharedHeap::allocateNew(bool abortOnFailure)
 {
     std::lock_guard<Mutex> locker(mutex());
-    constexpr unsigned objectSize = roundUpToMultipleOf<alignmentForIsoSharedAllocation>(static_cast<uintptr_t>(passedObjectSize));
+    constexpr unsigned objectSize = computeObjectSizeForSharedCell(passedObjectSize);
     return m_allocator.template allocate<objectSize>(
         [&] () -> void* {
             return allocateSlow<passedObjectSize>(abortOnFailure);
@@ -73,7 +78,7 @@ BNO_INLINE void* IsoSharedHeap::allocateSlow(bool abortOnFailure)
     m_currentPage = page;
     m_allocator = m_currentPage->startAllocating();
 
-    constexpr unsigned objectSize = roundUpToMultipleOf<alignmentForIsoSharedAllocation>(static_cast<uintptr_t>(passedObjectSize));
+    constexpr unsigned objectSize = computeObjectSizeForSharedCell(passedObjectSize);
     return m_allocator.allocate<objectSize>([] () { BCRASH(); return nullptr; });
 }
 
index 224ef20..a9e137e 100644 (file)
@@ -38,7 +38,7 @@ public:
     BEXPORT static IsoSharedPage* tryCreate();
 
     template<typename Config, typename Type>
-    void free(api::IsoHeap<Type>&, void*);
+    void free(const std::lock_guard<Mutex>&, api::IsoHeap<Type>&, void*);
     VariadicBumpAllocator startAllocating();
     void stopAllocating();
 
index f5730df..c4216b5 100644 (file)
@@ -35,7 +35,7 @@ namespace bmalloc {
 // This is because empty IsoSharedPage is still split into various different objects that should keep some part of virtual memory region dedicated.
 // We cannot set up bump allocation for such a page. Not freeing IsoSharedPages are OK since IsoSharedPage is only used for the lower tier of IsoHeap.
 template<typename Config, typename Type>
-void IsoSharedPage::free(api::IsoHeap<Type>& handle, void* ptr)
+void IsoSharedPage::free(const std::lock_guard<Mutex>&, api::IsoHeap<Type>& handle, void* ptr)
 {
     auto& heapImpl = handle.impl();
     uint8_t index = *indexSlotFor<Config>(ptr) & IsoHeapImplBase::maxAllocationFromSharedMask;
@@ -43,7 +43,7 @@ void IsoSharedPage::free(api::IsoHeap<Type>& handle, void* ptr)
     // If vptr is replaced to the other vptr, we may accidentally chain this pointer to the incorrect HeapImplBase, which totally breaks the IsoHeap's goal.
     // To harden that, we validate that this pointer is actually allocated for a specific HeapImplBase here by checking whether this pointer is listed in HeapImplBase's shared cells.
     RELEASE_BASSERT(heapImpl.m_sharedCells[index] == ptr);
-    heapImpl.m_usableBits |= (1U << index);
+    heapImpl.m_availableShared |= (1U << index);
 }
 
 inline VariadicBumpAllocator IsoSharedPage::startAllocating()