BASSERTs added in r196421 are causing debug test failures
authormsaboff@apple.com <msaboff@apple.com@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Sat, 13 Feb 2016 01:10:22 +0000 (01:10 +0000)
committermsaboff@apple.com <msaboff@apple.com@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Sat, 13 Feb 2016 01:10:22 +0000 (01:10 +0000)
https://bugs.webkit.org/show_bug.cgi?id=154113

Reviewed by Geoffrey Garen.

In VMHeap::deallocateLargeObject(), we drop the lock to deallocate the physical pages.
If the scavenger thread is running at the same time a synchronous call to scavenge()
comes in, we could call VMHeap::deallocateLargeObject() for an adjacent object while the
lock in the other thread is dropped.  We fix this by checking for adjacent objects we
can merge with and loop if we have one.

* bmalloc/FreeList.h:
(bmalloc::FreeList::push): Added BASSERT to catch adding unmerged free objects
* bmalloc/Heap.cpp:
(bmalloc::Heap::allocateLarge): Changed to use nextCanMerge().
* bmalloc/LargeObject.h:
(bmalloc::LargeObject::prevCanMerge): Repurposed prevIsAllocated.
(bmalloc::LargeObject::nextCanMerge): Repurposed nextIsAllocated.
(bmalloc::LargeObject::prevIsAllocated): Deleted.
(bmalloc::LargeObject::nextIsAllocated): Deleted.
* bmalloc/VMHeap.h:
(bmalloc::VMHeap::allocateLargeObject): Moved adding the extra object back to the free list
to after we set the object we'll return as being allocated.
(bmalloc::VMHeap::deallocateLargeObject):

git-svn-id: https://svn.webkit.org/repository/webkit/trunk@196536 268f45cc-cd09-0410-ab3c-d52691b4dbfc

Source/bmalloc/ChangeLog
Source/bmalloc/bmalloc/FreeList.h
Source/bmalloc/bmalloc/Heap.cpp
Source/bmalloc/bmalloc/LargeObject.h
Source/bmalloc/bmalloc/VMHeap.h

index dcc9f8c..44419cc 100644 (file)
@@ -1,3 +1,30 @@
+2016-02-12  Michael Saboff  <msaboff@apple.com>
+
+        BASSERTs added in r196421 are causing debug test failures
+        https://bugs.webkit.org/show_bug.cgi?id=154113
+
+        Reviewed by Geoffrey Garen.
+
+        In VMHeap::deallocateLargeObject(), we drop the lock to deallocate the physical pages.
+        If the scavenger thread is running at the same time a synchronous call to scavenge()
+        comes in, we could call VMHeap::deallocateLargeObject() for an adjacent object while the
+        lock in the other thread is dropped.  We fix this by checking for adjacent objects we
+        can merge with and loop if we have one.
+
+        * bmalloc/FreeList.h:
+        (bmalloc::FreeList::push): Added BASSERT to catch adding unmerged free objects
+        * bmalloc/Heap.cpp:
+        (bmalloc::Heap::allocateLarge): Changed to use nextCanMerge().
+        * bmalloc/LargeObject.h:
+        (bmalloc::LargeObject::prevCanMerge): Repurposed prevIsAllocated.
+        (bmalloc::LargeObject::nextCanMerge): Repurposed nextIsAllocated.
+        (bmalloc::LargeObject::prevIsAllocated): Deleted.
+        (bmalloc::LargeObject::nextIsAllocated): Deleted.
+        * bmalloc/VMHeap.h:
+        (bmalloc::VMHeap::allocateLargeObject): Moved adding the extra object back to the free list
+        to after we set the object we'll return as being allocated.
+        (bmalloc::VMHeap::deallocateLargeObject):
+
 2016-02-12  Mark Lam  <mark.lam@apple.com>
 
         Make BCRASH() use breakpoint traps too for non-debug OS(DARWIN).
index a409085..742d544 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2014, 2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2014-2016 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -60,6 +60,8 @@ inline FreeList::FreeList()
 inline void FreeList::push(Owner owner, const LargeObject& largeObject)
 {
     BASSERT(largeObject.isFree());
+    BASSERT(!largeObject.prevCanMerge());
+    BASSERT(!largeObject.nextCanMerge());
     if (m_vector.size() == m_limit) {
         removeInvalidAndDuplicateEntries(owner);
         m_limit = std::max(m_vector.size() * freeListGrowFactor, freeListSearchDepth);
index d9e66f6..d4511f5 100644 (file)
@@ -371,7 +371,7 @@ void* Heap::allocateLarge(std::lock_guard<StaticMutex>&, size_t size)
     largeObject.setFree(false);
     
     if (nextLargeObject) {
-        BASSERT(nextLargeObject.nextIsAllocated());
+        BASSERT(!nextLargeObject.nextCanMerge());
         m_largeObjects.insert(nextLargeObject);
     }
     
index 1f0ad00..7cbfb0e 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -54,9 +54,9 @@ public:
     void setFree(bool) const;
     bool isFree() const;
 
-    bool prevIsAllocated() const;
-    bool nextIsAllocated() const;
-    
+    bool prevCanMerge() const;
+    bool nextCanMerge() const;
+
     Owner owner() const;
     void setOwner(Owner) const;
     
@@ -121,21 +121,20 @@ inline bool LargeObject::isFree() const
     return m_beginTag->isFree();
 }
 
-inline bool LargeObject::prevIsAllocated() const
+inline bool LargeObject::prevCanMerge() const
 {
     EndTag* prev = m_beginTag->prev();
 
-    return !prev->isFree() || prev->owner() != this->owner();
+    return prev->isFree() && prev->owner() == this->owner();
 }
 
-inline bool LargeObject::nextIsAllocated() const
+inline bool LargeObject::nextCanMerge() const
 {
     BeginTag* next = m_endTag->next();
 
-    return !next->isFree() || next->owner() != this->owner();
+    return next->isFree() && next->owner() == this->owner();
 }
 
-
 inline Owner LargeObject::owner() const
 {
     validate();
index eb81c2c..17c956d 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2014, 2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2014-2016 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -57,7 +57,7 @@ public:
 
     void deallocateSmallPage(std::unique_lock<StaticMutex>&, SmallPage*);
     void deallocateMediumPage(std::unique_lock<StaticMutex>&, MediumPage*);
-    void deallocateLargeObject(std::unique_lock<StaticMutex>&, LargeObject&);
+    void deallocateLargeObject(std::unique_lock<StaticMutex>&, LargeObject);
 
 private:
     LargeObject allocateLargeObject(LargeObject&, size_t);
@@ -95,14 +95,23 @@ inline LargeObject VMHeap::allocateLargeObject(LargeObject& largeObject, size_t
 {
     BASSERT(largeObject.isFree());
 
+    LargeObject nextLargeObject;
+
     if (largeObject.size() - size > largeMin) {
         std::pair<LargeObject, LargeObject> split = largeObject.split(size);
         largeObject = split.first;
-        m_largeObjects.insert(split.second);
+        nextLargeObject = split.second;
     }
 
     vmAllocatePhysicalPagesSloppy(largeObject.begin(), largeObject.size());
     largeObject.setOwner(Owner::Heap);
+
+    // Be sure to set the owner for the object we return before inserting the leftover back
+    // into the free list. The free list asserts that we never insert an object that could
+    // have merged with its neighbor.
+    if (nextLargeObject)
+        m_largeObjects.insert(nextLargeObject);
+
     return largeObject.begin();
 }
 
@@ -151,25 +160,29 @@ inline void VMHeap::deallocateMediumPage(std::unique_lock<StaticMutex>& lock, Me
     m_mediumPages.push(page);
 }
 
-inline void VMHeap::deallocateLargeObject(std::unique_lock<StaticMutex>& lock, LargeObject& largeObject)
+inline void VMHeap::deallocateLargeObject(std::unique_lock<StaticMutex>& lock, LargeObject largeObject)
 {
     largeObject.setOwner(Owner::VMHeap);
-    
-    // If we couldn't merge with our neighbors before because they were in the
-    // VM heap, we can merge with them now.
-    LargeObject merged = largeObject.merge();
 
-    // Temporarily mark this object as allocated to prevent clients from merging
-    // with it or allocating it while we're messing with its physical pages.
-    merged.setFree(false);
+    // Multiple threads might scavenge concurrently, meaning that new merging opportunities
+    // become visible after we reacquire the lock. Therefore we loop.
+    do {
+        // If we couldn't merge with our neighbors before because they were in the
+        // VM heap, we can merge with them now.
+        largeObject = largeObject.merge();
 
-    lock.unlock();
-    vmDeallocatePhysicalPagesSloppy(merged.begin(), merged.size());
-    lock.lock();
+        // Temporarily mark this object as allocated to prevent clients from merging
+        // with it or allocating it while we're messing with its physical pages.
+        largeObject.setFree(false);
+
+        lock.unlock();
+        vmDeallocatePhysicalPagesSloppy(largeObject.begin(), largeObject.size());
+        lock.lock();
 
-    merged.setFree(true);
+        largeObject.setFree(true);
+    } while (largeObject.prevCanMerge() || largeObject.nextCanMerge());
 
-    m_largeObjects.insert(merged);
+    m_largeObjects.insert(largeObject);
 }
 
 } // namespace bmalloc