Release assert in com.apple.WebKit.WebContent under JavaScriptCore: JSC::JSONProtoFun...
authorggaren@apple.com <ggaren@apple.com@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Thu, 7 May 2015 23:29:15 +0000 (23:29 +0000)
committerggaren@apple.com <ggaren@apple.com@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Thu, 7 May 2015 23:29:15 +0000 (23:29 +0000)
https://bugs.webkit.org/show_bug.cgi?id=144758

Reviewed by Andreas Kling.

This was an out-of-memory error when trying to shrink a string builder.
bmalloc was missing the optimization that allowed realloc() to shrink
without copying. So, let's add it.

* bmalloc/Allocator.cpp:
(bmalloc::Allocator::reallocate): Added Large and XLarge cases for
shrinking without copying. This isn't possible for small and medium
objects, and probably not very profitable, either.

* bmalloc/Heap.cpp:
(bmalloc::Heap::findXLarge):
(bmalloc::Heap::deallocateXLarge):
* bmalloc/Heap.h: Refactored this code to return a reference to an
XLarge range. This makes the code reusable, and also makes it easier
for realloc() to update metadata.

* bmalloc/LargeObject.h:
(bmalloc::LargeObject::split): Allow allocated objects to split because
that's what realloc() wants to do, and there's nothing intrinsically
wrong with it.

git-svn-id: https://svn.webkit.org/repository/webkit/trunk@183959 268f45cc-cd09-0410-ab3c-d52691b4dbfc

Source/bmalloc/ChangeLog
Source/bmalloc/bmalloc/Allocator.cpp
Source/bmalloc/bmalloc/Deallocator.cpp
Source/bmalloc/bmalloc/Heap.cpp
Source/bmalloc/bmalloc/Heap.h
Source/bmalloc/bmalloc/LargeObject.h

index b2308d2..7e1948a 100644 (file)
@@ -1,3 +1,31 @@
+2015-05-07  Geoffrey Garen  <ggaren@apple.com>
+
+        Release assert in com.apple.WebKit.WebContent under JavaScriptCore: JSC::JSONProtoFuncStringify
+        https://bugs.webkit.org/show_bug.cgi?id=144758
+
+        Reviewed by Andreas Kling.
+
+        This was an out-of-memory error when trying to shrink a string builder.
+        bmalloc was missing the optimization that allowed realloc() to shrink
+        without copying. So, let's add it.
+
+        * bmalloc/Allocator.cpp:
+        (bmalloc::Allocator::reallocate): Added Large and XLarge cases for
+        shrinking without copying. This isn't possible for small and medium
+        objects, and probably not very profitable, either.
+
+        * bmalloc/Heap.cpp:
+        (bmalloc::Heap::findXLarge):
+        (bmalloc::Heap::deallocateXLarge):
+        * bmalloc/Heap.h: Refactored this code to return a reference to an
+        XLarge range. This makes the code reusable, and also makes it easier
+        for realloc() to update metadata.
+
+        * bmalloc/LargeObject.h:
+        (bmalloc::LargeObject::split): Allow allocated objects to split because
+        that's what realloc() wants to do, and there's nothing intrinsically
+        wrong with it.
+
 2015-05-07  Dan Bernstein  <mitz@apple.com>
 
         <rdar://problem/19317140> [Xcode] Remove usage of AspenFamily.xcconfig in Source/
index b1e70ba..d86118a 100644 (file)
@@ -112,10 +112,6 @@ void* Allocator::reallocate(void* object, size_t newSize)
     if (!m_isBmallocEnabled)
         return realloc(object, newSize);
 
-    void* result = allocate(newSize);
-    if (!object)
-        return result;
-
     size_t oldSize = 0;
     switch (objectType(object)) {
     case Small: {
@@ -129,20 +125,48 @@ void* Allocator::reallocate(void* object, size_t newSize)
         break;
     }
     case Large: {
-        std::lock_guard<StaticMutex> lock(PerProcess<Heap>::mutex());
+        std::unique_lock<StaticMutex> lock(PerProcess<Heap>::mutex());
         LargeObject largeObject(object);
         oldSize = largeObject.size();
+
+        if (newSize < oldSize && newSize > mediumMax) {
+            newSize = roundUpToMultipleOf<largeAlignment>(newSize);
+            if (oldSize - newSize >= largeMin) {
+                std::pair<LargeObject, LargeObject> split = largeObject.split(newSize);
+                
+                lock.unlock();
+                m_deallocator.deallocate(split.second.begin());
+                lock.lock();
+            }
+            return object;
+        }
         break;
     }
     case XLarge: {
-        std::lock_guard<StaticMutex> lock(PerProcess<Heap>::mutex());
-        Range range = PerProcess<Heap>::getFastCase()->findXLarge(lock, object);
-        RELEASE_BASSERT(range);
+        BASSERT(objectType(nullptr) == XLarge);
+        if (!object)
+            break;
+
+        std::unique_lock<StaticMutex> lock(PerProcess<Heap>::mutex());
+        Range& range = PerProcess<Heap>::getFastCase()->findXLarge(lock, object);
         oldSize = range.size();
+
+        if (newSize < oldSize && newSize > largeMax) {
+            newSize = roundUpToMultipleOf<xLargeAlignment>(newSize);
+            if (oldSize - newSize >= xLargeAlignment) {
+                lock.unlock();
+                vmDeallocate(static_cast<char*>(object) + oldSize, oldSize - newSize);
+                lock.lock();
+
+                range = Range(object, newSize);
+            }
+            return object;
+        }
         break;
     }
     }
 
+    void* result = allocate(newSize);
     size_t copySize = std::min(oldSize, newSize);
     memcpy(result, object, copySize);
     m_deallocator.deallocate(object);
index 393b677..850e012 100644 (file)
@@ -99,6 +99,7 @@ void Deallocator::deallocateSlowCase(void* object)
         return;
     }
 
+    BASSERT(objectType(nullptr) == XLarge);
     if (!object)
         return;
 
index ae7d709..383ccd2 100644 (file)
@@ -306,7 +306,7 @@ void* Heap::tryAllocateXLarge(std::lock_guard<StaticMutex>&, size_t alignment, s
     return result;
 }
 
-Range Heap::findXLarge(std::lock_guard<StaticMutex>&, void* object)
+Range& Heap::findXLarge(std::unique_lock<StaticMutex>&, void* object)
 {
     for (auto& range : m_xLargeObjects) {
         if (range.begin() != object)
@@ -314,23 +314,17 @@ Range Heap::findXLarge(std::lock_guard<StaticMutex>&, void* object)
         return range;
     }
 
-    return Range();
+    RELEASE_BASSERT(false);
+    return *static_cast<Range*>(nullptr); // Silence compiler error.
 }
 
 void Heap::deallocateXLarge(std::unique_lock<StaticMutex>& lock, void* object)
 {
-    for (auto& range : m_xLargeObjects) {
-        if (range.begin() != object)
-            continue;
-
-        Range toDeallocate = m_xLargeObjects.pop(&range);
+    Range toDeallocate = m_xLargeObjects.pop(&findXLarge(lock, object));
 
-        lock.unlock();
-        vmDeallocate(toDeallocate.begin(), toDeallocate.size());
-        lock.lock();
-        
-        break;
-    }
+    lock.unlock();
+    vmDeallocate(toDeallocate.begin(), toDeallocate.size());
+    lock.lock();
 }
 
 void* Heap::allocateLarge(std::lock_guard<StaticMutex>&, LargeObject& largeObject, size_t size)
index 3f61462..c249004 100644 (file)
@@ -66,7 +66,7 @@ public:
     void* allocateXLarge(std::lock_guard<StaticMutex>&, size_t);
     void* allocateXLarge(std::lock_guard<StaticMutex>&, size_t alignment, size_t);
     void* tryAllocateXLarge(std::lock_guard<StaticMutex>&, size_t alignment, size_t);
-    Range findXLarge(std::lock_guard<StaticMutex>&, void*);
+    Range& findXLarge(std::unique_lock<StaticMutex>&, void*);
     void deallocateXLarge(std::unique_lock<StaticMutex>&, void*);
 
     void scavenge(std::unique_lock<StaticMutex>&, std::chrono::milliseconds sleepDuration);
index e94f21f..e3400a0 100644 (file)
@@ -206,8 +206,6 @@ inline LargeObject LargeObject::merge() const
 
 inline std::pair<LargeObject, LargeObject> LargeObject::split(size_t size) const
 {
-    BASSERT(isFree());
-
     Range split(begin(), size);
     Range leftover = Range(split.end(), this->size() - size);
     BASSERT(leftover.size() >= largeMin);