Fix bmalloc::Allocator:tryAllocate() to return null on failure to allocate.
authormark.lam@apple.com <mark.lam@apple.com@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Fri, 6 Sep 2019 17:04:13 +0000 (17:04 +0000)
committermark.lam@apple.com <mark.lam@apple.com@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Fri, 6 Sep 2019 17:04:13 +0000 (17:04 +0000)
https://bugs.webkit.org/show_bug.cgi?id=201529
<rdar://problem/53935772>

Reviewed by Yusuke Suzuki.

JSTests:

* stress/test-out-of-memory.js: Added.

Source/bmalloc:

In this implementation, we pass FailureAction in as a runtime option.  If this
proves to be a perf issue, we can easily fix this by passing it as a template
argument.  That will also automatically elide unneeded code paths.  We'll defer
that exercise until we have evidence that it is warranted.

* CMakeLists.txt:
* bmalloc.xcodeproj/project.pbxproj:
* bmalloc/Allocator.cpp:
(bmalloc::Allocator::allocateImpl):
(bmalloc::Allocator::reallocateImpl):
(bmalloc::Allocator::refillAllocatorSlowCase):
(bmalloc::Allocator::refillAllocator):
(bmalloc::Allocator::allocateLarge):
(bmalloc::Allocator::allocateLogSizeClass):
(bmalloc::Allocator::allocateSlowCase):
(bmalloc::Allocator::tryAllocate): Deleted.
(bmalloc::Allocator::allocate): Deleted.
(bmalloc::Allocator::reallocate): Deleted.
(bmalloc::Allocator::tryReallocate): Deleted.
* bmalloc/Allocator.h:
(bmalloc::Allocator::tryAllocate):
(bmalloc::Allocator::allocate):
(bmalloc::Allocator::tryReallocate):
(bmalloc::Allocator::reallocate):
(bmalloc::Allocator::allocateImpl):
* bmalloc/BumpAllocator.h:
* bmalloc/FailureAction.h: Added.
* bmalloc/Heap.cpp:
(bmalloc::Heap::allocateSmallChunk):
(bmalloc::Heap::allocateSmallPage):
(bmalloc::Heap::allocateSmallBumpRangesByMetadata):
(bmalloc::Heap::allocateSmallBumpRangesByObject):
(bmalloc::Heap::allocateLarge):
(bmalloc::Heap::tryAllocateLarge): Deleted.
* bmalloc/Heap.h:
(bmalloc::Heap::allocateSmallBumpRanges):
* bmalloc/bmalloc.cpp:
(bmalloc::api::tryLargeZeroedMemalignVirtual):

git-svn-id: https://svn.webkit.org/repository/webkit/trunk@249578 268f45cc-cd09-0410-ab3c-d52691b4dbfc

12 files changed:
JSTests/ChangeLog
JSTests/stress/test-out-of-memory.js [new file with mode: 0644]
Source/bmalloc/CMakeLists.txt
Source/bmalloc/ChangeLog
Source/bmalloc/bmalloc.xcodeproj/project.pbxproj
Source/bmalloc/bmalloc/Allocator.cpp
Source/bmalloc/bmalloc/Allocator.h
Source/bmalloc/bmalloc/BumpAllocator.h
Source/bmalloc/bmalloc/FailureAction.h [new file with mode: 0644]
Source/bmalloc/bmalloc/Heap.cpp
Source/bmalloc/bmalloc/Heap.h
Source/bmalloc/bmalloc/bmalloc.cpp

index ed347d8..5da6088 100644 (file)
@@ -1,3 +1,13 @@
+2019-09-06  Mark Lam  <mark.lam@apple.com>
+
+        Fix bmalloc::Allocator:tryAllocate() to return null on failure to allocate.
+        https://bugs.webkit.org/show_bug.cgi?id=201529
+        <rdar://problem/53935772>
+
+        Reviewed by Yusuke Suzuki.
+
+        * stress/test-out-of-memory.js: Added.
+
 2019-09-05  Tadeu Zagallo  <tzagallo@apple.com>
 
         LazyClassStructure::setConstructor should not store the constructor to the global object
diff --git a/JSTests/stress/test-out-of-memory.js b/JSTests/stress/test-out-of-memory.js
new file mode 100644 (file)
index 0000000..6ec6089
--- /dev/null
@@ -0,0 +1,24 @@
+const a = [0];
+a.__proto__ = {};
+Object.defineProperty(a, 0, {
+    get: foo
+});
+Object.defineProperty(a, 80000000, {});
+
+function foo() {
+    new Uint8Array(a);
+}
+
+new Promise(foo);
+
+var exception;
+try {
+    for (let i = 0; i < 10000000; i++)
+        new ArrayBuffer(1000);
+
+} catch (e) {
+    exception = e;
+}
+
+if (exception != "Error: Out of memory")
+    throw "FAILED";
index b594e27..f3bea19 100644 (file)
@@ -64,6 +64,7 @@ set(bmalloc_PUBLIC_HEADERS
     bmalloc/EligibilityResult.h
     bmalloc/EligibilityResultInlines.h
     bmalloc/Environment.h
+    bmalloc/FailureAction.h
     bmalloc/FixedVector.h
     bmalloc/FreeList.h
     bmalloc/FreeListInlines.h
index 51d9a36..e79073c 100644 (file)
@@ -1,3 +1,50 @@
+2019-09-06  Mark Lam  <mark.lam@apple.com>
+
+        Fix bmalloc::Allocator:tryAllocate() to return null on failure to allocate.
+        https://bugs.webkit.org/show_bug.cgi?id=201529
+        <rdar://problem/53935772>
+
+        Reviewed by Yusuke Suzuki.
+
+        In this implementation, we pass FailureAction in as a runtime option.  If this
+        proves to be a perf issue, we can easily fix this by passing it as a template
+        argument.  That will also automatically elide unneeded code paths.  We'll defer
+        that exercise until we have evidence that it is warranted.
+
+        * CMakeLists.txt:
+        * bmalloc.xcodeproj/project.pbxproj:
+        * bmalloc/Allocator.cpp:
+        (bmalloc::Allocator::allocateImpl):
+        (bmalloc::Allocator::reallocateImpl):
+        (bmalloc::Allocator::refillAllocatorSlowCase):
+        (bmalloc::Allocator::refillAllocator):
+        (bmalloc::Allocator::allocateLarge):
+        (bmalloc::Allocator::allocateLogSizeClass):
+        (bmalloc::Allocator::allocateSlowCase):
+        (bmalloc::Allocator::tryAllocate): Deleted.
+        (bmalloc::Allocator::allocate): Deleted.
+        (bmalloc::Allocator::reallocate): Deleted.
+        (bmalloc::Allocator::tryReallocate): Deleted.
+        * bmalloc/Allocator.h:
+        (bmalloc::Allocator::tryAllocate):
+        (bmalloc::Allocator::allocate):
+        (bmalloc::Allocator::tryReallocate):
+        (bmalloc::Allocator::reallocate):
+        (bmalloc::Allocator::allocateImpl):
+        * bmalloc/BumpAllocator.h:
+        * bmalloc/FailureAction.h: Added.
+        * bmalloc/Heap.cpp:
+        (bmalloc::Heap::allocateSmallChunk):
+        (bmalloc::Heap::allocateSmallPage):
+        (bmalloc::Heap::allocateSmallBumpRangesByMetadata):
+        (bmalloc::Heap::allocateSmallBumpRangesByObject):
+        (bmalloc::Heap::allocateLarge):
+        (bmalloc::Heap::tryAllocateLarge): Deleted.
+        * bmalloc/Heap.h:
+        (bmalloc::Heap::allocateSmallBumpRanges):
+        * bmalloc/bmalloc.cpp:
+        (bmalloc::api::tryLargeZeroedMemalignVirtual):
+
 2019-09-05  Mark Lam  <mark.lam@apple.com>
 
         Refactor the Gigacage code to require less pointer casting.
index d48b116..bc83572 100644 (file)
                E3FBB5A1225EADB000DB6FBD /* IsoSharedHeap.cpp in Sources */ = {isa = PBXBuildFile; fileRef = E3FBB59E225EADB000DB6FBD /* IsoSharedHeap.cpp */; };
                E3FBB5A2225EADB000DB6FBD /* IsoSharedHeap.h in Headers */ = {isa = PBXBuildFile; fileRef = E3FBB59F225EADB000DB6FBD /* IsoSharedHeap.h */; settings = {ATTRIBUTES = (Private, ); }; };
                E3FBB5A4225ECAD200DB6FBD /* IsoSharedHeapInlines.h in Headers */ = {isa = PBXBuildFile; fileRef = E3FBB5A3225ECAD200DB6FBD /* IsoSharedHeapInlines.h */; settings = {ATTRIBUTES = (Private, ); }; };
+               FE48BD3B2321E8D700F136D0 /* FailureAction.h in Headers */ = {isa = PBXBuildFile; fileRef = FE48BD3A2321E8CC00F136D0 /* FailureAction.h */; settings = {ATTRIBUTES = (Private, ); }; };
 /* End PBXBuildFile section */
 
 /* Begin PBXContainerItemProxy section */
                E3FBB59E225EADB000DB6FBD /* IsoSharedHeap.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = IsoSharedHeap.cpp; path = bmalloc/IsoSharedHeap.cpp; sourceTree = "<group>"; };
                E3FBB59F225EADB000DB6FBD /* IsoSharedHeap.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = IsoSharedHeap.h; path = bmalloc/IsoSharedHeap.h; sourceTree = "<group>"; };
                E3FBB5A3225ECAD200DB6FBD /* IsoSharedHeapInlines.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = IsoSharedHeapInlines.h; path = bmalloc/IsoSharedHeapInlines.h; sourceTree = "<group>"; };
+               FE48BD3A2321E8CC00F136D0 /* FailureAction.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = FailureAction.h; path = bmalloc/FailureAction.h; sourceTree = "<group>"; };
 /* End PBXFileReference section */
 
 /* Begin PBXFrameworksBuildPhase section */
                                142B44351E2839E7001DA6E9 /* DebugHeap.h */,
                                14895D8F1A3A319C0006235D /* Environment.cpp */,
                                14895D901A3A319C0006235D /* Environment.h */,
+                               FE48BD3A2321E8CC00F136D0 /* FailureAction.h */,
                                0F5BF14E1F22DEAF0029D91D /* Gigacage.cpp */,
                                0F5BF14C1F22B0C30029D91D /* Gigacage.h */,
                                14DA320E18875D9F007269E0 /* Heap.cpp */,
                                0F7EB82E1F9541B000F1ABCB /* IsoPage.h in Headers */,
                                0F7EB8311F9541B000F1ABCB /* IsoPageInlines.h in Headers */,
                                0F7EB82B1F9541B000F1ABCB /* IsoPageTrigger.h in Headers */,
+                               FE48BD3B2321E8D700F136D0 /* FailureAction.h in Headers */,
                                E3FBB5A0225EADB000DB6FBD /* IsoSharedConfig.h in Headers */,
                                E3FBB5A2225EADB000DB6FBD /* IsoSharedHeap.h in Headers */,
                                E3FBB5A4225ECAD200DB6FBD /* IsoSharedHeapInlines.h in Headers */,
index d086d22..f412083 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2014-2018 Apple Inc. All rights reserved.
+ * Copyright (C) 2014-2019 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -50,28 +50,7 @@ Allocator::~Allocator()
     scavenge();
 }
 
-void* Allocator::tryAllocate(size_t size)
-{
-    if (size <= smallMax)
-        return allocate(size);
-
-    std::unique_lock<Mutex> lock(Heap::mutex());
-    return m_heap.tryAllocateLarge(lock, alignment, size);
-}
-
-void* Allocator::allocate(size_t alignment, size_t size)
-{
-    bool crashOnFailure = true;
-    return allocateImpl(alignment, size, crashOnFailure);
-}
-
-void* Allocator::tryAllocate(size_t alignment, size_t size)
-{
-    bool crashOnFailure = false;
-    return allocateImpl(alignment, size, crashOnFailure);
-}
-
-void* Allocator::allocateImpl(size_t alignment, size_t size, bool crashOnFailure)
+void* Allocator::allocateImpl(size_t alignment, size_t size, FailureAction action)
 {
     BASSERT(isPowerOfTwo(alignment));
 
@@ -79,27 +58,12 @@ void* Allocator::allocateImpl(size_t alignment, size_t size, bool crashOnFailure
         size = alignment;
 
     if (size <= smallMax && alignment <= smallMax)
-        return allocate(roundUpToMultipleOf(alignment, size));
-
-    std::unique_lock<Mutex> lock(Heap::mutex());
-    if (crashOnFailure)
-        return m_heap.allocateLarge(lock, alignment, size);
-    return m_heap.tryAllocateLarge(lock, alignment, size);
-}
-
-void* Allocator::reallocate(void* object, size_t newSize)
-{
-    bool crashOnFailure = true;
-    return reallocateImpl(object, newSize, crashOnFailure);
-}
+        return allocateImpl(roundUpToMultipleOf(alignment, size), action);
 
-void* Allocator::tryReallocate(void* object, size_t newSize)
-{
-    bool crashOnFailure = false;
-    return reallocateImpl(object, newSize, crashOnFailure);
+    return allocateLarge(size, action);
 }
 
-void* Allocator::reallocateImpl(void* object, size_t newSize, bool crashOnFailure)
+void* Allocator::reallocateImpl(void* object, size_t newSize, FailureAction action)
 {
     size_t oldSize = 0;
     switch (objectType(m_heap, object)) {
@@ -125,12 +89,10 @@ void* Allocator::reallocateImpl(void* object, size_t newSize, bool crashOnFailur
     }
 
     void* result = nullptr;
-    if (crashOnFailure)
-        result = allocate(newSize);
-    else {
-        result = tryAllocate(newSize);
-        if (!result)
-            return nullptr;
+    result = allocateImpl(newSize, action);
+    if (!result) {
+        BASSERT(action == FailureAction::ReturnNull);
+        return nullptr;
     }
     size_t copySize = std::min(oldSize, newSize);
     memcpy(result, object, copySize);
@@ -157,51 +119,55 @@ void Allocator::scavenge()
     }
 }
 
-BNO_INLINE void Allocator::refillAllocatorSlowCase(BumpAllocator& allocator, size_t sizeClass)
+BNO_INLINE void Allocator::refillAllocatorSlowCase(BumpAllocator& allocator, size_t sizeClass, FailureAction action)
 {
     BumpRangeCache& bumpRangeCache = m_bumpRangeCaches[sizeClass];
 
     std::unique_lock<Mutex> lock(Heap::mutex());
     m_deallocator.processObjectLog(lock);
-    m_heap.allocateSmallBumpRanges(lock, sizeClass, allocator, bumpRangeCache, m_deallocator.lineCache(lock));
+    m_heap.allocateSmallBumpRanges(lock, sizeClass, allocator, bumpRangeCache, m_deallocator.lineCache(lock), action);
 }
 
-BINLINE void Allocator::refillAllocator(BumpAllocator& allocator, size_t sizeClass)
+BINLINE void Allocator::refillAllocator(BumpAllocator& allocator, size_t sizeClass, FailureAction action)
 {
     BumpRangeCache& bumpRangeCache = m_bumpRangeCaches[sizeClass];
     if (!bumpRangeCache.size())
-        return refillAllocatorSlowCase(allocator, sizeClass);
+        return refillAllocatorSlowCase(allocator, sizeClass, action);
     return allocator.refill(bumpRangeCache.pop());
 }
 
-BNO_INLINE void* Allocator::allocateLarge(size_t size)
+BNO_INLINE void* Allocator::allocateLarge(size_t size, FailureAction action)
 {
     std::unique_lock<Mutex> lock(Heap::mutex());
-    return m_heap.allocateLarge(lock, alignment, size);
+    return m_heap.allocateLarge(lock, alignment, size, action);
 }
 
-BNO_INLINE void* Allocator::allocateLogSizeClass(size_t size)
+BNO_INLINE void* Allocator::allocateLogSizeClass(size_t size, FailureAction action)
 {
     size_t sizeClass = bmalloc::sizeClass(size);
     BumpAllocator& allocator = m_bumpAllocators[sizeClass];
     if (!allocator.canAllocate())
-        refillAllocator(allocator, sizeClass);
+        refillAllocator(allocator, sizeClass, action);
+    if (action == FailureAction::ReturnNull && !allocator.canAllocate())
+        return nullptr;
     return allocator.allocate();
 }
 
-void* Allocator::allocateSlowCase(size_t size)
+void* Allocator::allocateSlowCase(size_t size, FailureAction action)
 {
     if (size <= maskSizeClassMax) {
         size_t sizeClass = bmalloc::maskSizeClass(size);
         BumpAllocator& allocator = m_bumpAllocators[sizeClass];
-        refillAllocator(allocator, sizeClass);
+        refillAllocator(allocator, sizeClass, action);
+        if (action == FailureAction::ReturnNull && !allocator.canAllocate())
+            return nullptr;
         return allocator.allocate();
     }
 
     if (size <= smallMax)
-        return allocateLogSizeClass(size);
+        return allocateLogSizeClass(size, action);
 
-    return allocateLarge(size);
+    return allocateLarge(size, action);
 }
 
 } // namespace bmalloc
index 933aae6..a25dd09 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2014-2018 Apple Inc. All rights reserved.
+ * Copyright (C) 2014-2019 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -28,6 +28,7 @@
 
 #include "BExport.h"
 #include "BumpAllocator.h"
+#include "FailureAction.h"
 #include <array>
 
 namespace bmalloc {
@@ -42,27 +43,28 @@ public:
     Allocator(Heap&, Deallocator&);
     ~Allocator();
 
-    BEXPORT void* tryAllocate(size_t);
-    void* allocate(size_t);
-    void* tryAllocate(size_t alignment, size_t);
-    void* allocate(size_t alignment, size_t);
-    void* tryReallocate(void*, size_t);
-    void* reallocate(void*, size_t);
+    void* tryAllocate(size_t size) { return allocateImpl(size, FailureAction::ReturnNull); }
+    void* allocate(size_t size) { return allocateImpl(size, FailureAction::Crash); }
+    void* tryAllocate(size_t alignment, size_t size) { return allocateImpl(alignment, size, FailureAction::ReturnNull); }
+    void* allocate(size_t alignment, size_t size) { return allocateImpl(alignment, size, FailureAction::Crash); }
+    void* tryReallocate(void* object, size_t newSize) { return reallocateImpl(object, newSize, FailureAction::ReturnNull); }
+    void* reallocate(void* object, size_t newSize) { return reallocateImpl(object, newSize, FailureAction::Crash); }
 
     void scavenge();
 
 private:
-    void* allocateImpl(size_t alignment, size_t, bool crashOnFailure);
-    void* reallocateImpl(void*, size_t, bool crashOnFailure);
+    void* allocateImpl(size_t, FailureAction);
+    void* allocateImpl(size_t alignment, size_t, FailureAction);
+    void* reallocateImpl(void*, size_t, FailureAction);
 
     bool allocateFastCase(size_t, void*&);
-    BEXPORT void* allocateSlowCase(size_t);
-    
-    void* allocateLogSizeClass(size_t);
-    void* allocateLarge(size_t);
+    BEXPORT void* allocateSlowCase(size_t, FailureAction);
+
+    void* allocateLogSizeClass(size_t, FailureAction);
+    void* allocateLarge(size_t, FailureAction);
     
-    void refillAllocator(BumpAllocator&, size_t sizeClass);
-    void refillAllocatorSlowCase(BumpAllocator&, size_t sizeClass);
+    inline void refillAllocator(BumpAllocator&, size_t sizeClass, FailureAction);
+    void refillAllocatorSlowCase(BumpAllocator&, size_t sizeClass, FailureAction);
     
     std::array<BumpAllocator, sizeClassCount> m_bumpAllocators;
     std::array<BumpRangeCache, sizeClassCount> m_bumpRangeCaches;
@@ -84,11 +86,11 @@ inline bool Allocator::allocateFastCase(size_t size, void*& object)
     return true;
 }
 
-inline void* Allocator::allocate(size_t size)
+inline void* Allocator::allocateImpl(size_t size, FailureAction action)
 {
     void* object;
     if (!allocateFastCase(size, object))
-        return allocateSlowCase(size);
+        return allocateSlowCase(size, action);
     return object;
 }
 
index 6f70917..fab3d99 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2014 Apple Inc. All rights reserved.
+ * Copyright (C) 2014-2019 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
diff --git a/Source/bmalloc/bmalloc/FailureAction.h b/Source/bmalloc/bmalloc/FailureAction.h
new file mode 100644 (file)
index 0000000..a0d1e0b
--- /dev/null
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2019 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+namespace bmalloc {
+
+enum class FailureAction { Crash, ReturnNull };
+
+} // namespace bmalloc
index faee2d5..d54ceb1 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2014-2018 Apple Inc. All rights reserved.
+ * Copyright (C) 2014-2019 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -226,17 +226,21 @@ void Heap::deallocateLineCache(std::unique_lock<Mutex>&, LineCache& lineCache)
     }
 }
 
-void Heap::allocateSmallChunk(std::unique_lock<Mutex>& lock, size_t pageClass)
+void Heap::allocateSmallChunk(std::unique_lock<Mutex>& lock, size_t pageClass, FailureAction action)
 {
     RELEASE_BASSERT(isActiveHeapKind(m_kind));
     
     size_t pageSize = bmalloc::pageSize(pageClass);
 
-    Chunk* chunk = [&]() {
+    Chunk* chunk = [&]() -> Chunk* {
         if (!m_chunkCache[pageClass].isEmpty())
             return m_chunkCache[pageClass].pop();
 
-        void* memory = allocateLarge(lock, chunkSize, chunkSize);
+        void* memory = allocateLarge(lock, chunkSize, chunkSize, action);
+        if (!memory) {
+            BASSERT(action == FailureAction::ReturnNull);
+            return nullptr;
+        }
 
         Chunk* chunk = new (memory) Chunk(pageSize);
 
@@ -256,7 +260,8 @@ void Heap::allocateSmallChunk(std::unique_lock<Mutex>& lock, size_t pageClass)
         return chunk;
     }();
     
-    m_freePages[pageClass].push(chunk);
+    if (chunk)
+        m_freePages[pageClass].push(chunk);
 }
 
 void Heap::deallocateSmallChunk(Chunk* chunk, size_t pageClass)
@@ -285,7 +290,7 @@ void Heap::deallocateSmallChunk(Chunk* chunk, size_t pageClass)
     m_largeFree.add(LargeRange(chunk, size, startPhysicalSize, totalPhysicalSize));
 }
 
-SmallPage* Heap::allocateSmallPage(std::unique_lock<Mutex>& lock, size_t sizeClass, LineCache& lineCache)
+SmallPage* Heap::allocateSmallPage(std::unique_lock<Mutex>& lock, size_t sizeClass, LineCache& lineCache, FailureAction action)
 {
     RELEASE_BASSERT(isActiveHeapKind(m_kind));
 
@@ -297,11 +302,13 @@ SmallPage* Heap::allocateSmallPage(std::unique_lock<Mutex>& lock, size_t sizeCla
 
     m_scavenger->didStartGrowing();
     
-    SmallPage* page = [&]() {
+    SmallPage* page = [&]() -> SmallPage* {
         size_t pageClass = m_pageClasses[sizeClass];
         
         if (m_freePages[pageClass].isEmpty())
-            allocateSmallChunk(lock, pageClass);
+            allocateSmallChunk(lock, pageClass, action);
+        if (action == FailureAction::ReturnNull && m_freePages[pageClass].isEmpty())
+            return nullptr;
 
         Chunk* chunk = m_freePages[pageClass].tail();
 
@@ -328,6 +335,10 @@ SmallPage* Heap::allocateSmallPage(std::unique_lock<Mutex>& lock, size_t sizeCla
 
         return page;
     }();
+    if (!page) {
+        BASSERT(action == FailureAction::ReturnNull);
+        return nullptr;
+    }
 
     page->setSizeClass(sizeClass);
     return page;
@@ -376,11 +387,16 @@ void Heap::deallocateSmallLine(std::unique_lock<Mutex>& lock, Object object, Lin
 void Heap::allocateSmallBumpRangesByMetadata(
     std::unique_lock<Mutex>& lock, size_t sizeClass,
     BumpAllocator& allocator, BumpRangeCache& rangeCache,
-    LineCache& lineCache)
+    LineCache& lineCache, FailureAction action)
 {
+    BUNUSED(action);
     RELEASE_BASSERT(isActiveHeapKind(m_kind));
 
-    SmallPage* page = allocateSmallPage(lock, sizeClass, lineCache);
+    SmallPage* page = allocateSmallPage(lock, sizeClass, lineCache, action);
+    if (!page) {
+        BASSERT(action == FailureAction::ReturnNull);
+        return;
+    }
     SmallLine* lines = page->begin();
     BASSERT(page->hasFreeLines(lock));
     size_t smallLineCount = m_vmPageSizePhysical / smallLineSize;
@@ -418,14 +434,14 @@ void Heap::allocateSmallBumpRangesByMetadata(
     for (;;) {
         if (!findSmallBumpRange(lineNumber)) {
             page->setHasFreeLines(lock, false);
-            BASSERT(allocator.canAllocate());
+            BASSERT(action == FailureAction::ReturnNull || allocator.canAllocate());
             return;
         }
 
         // In a fragmented page, some free ranges might not fit in the cache.
         if (rangeCache.size() == rangeCache.capacity()) {
             lineCache[sizeClass].push(page);
-            BASSERT(allocator.canAllocate());
+            BASSERT(action == FailureAction::ReturnNull || allocator.canAllocate());
             return;
         }
 
@@ -440,12 +456,17 @@ void Heap::allocateSmallBumpRangesByMetadata(
 void Heap::allocateSmallBumpRangesByObject(
     std::unique_lock<Mutex>& lock, size_t sizeClass,
     BumpAllocator& allocator, BumpRangeCache& rangeCache,
-    LineCache& lineCache)
+    LineCache& lineCache, FailureAction action)
 {
+    BUNUSED(action);
     RELEASE_BASSERT(isActiveHeapKind(m_kind));
 
     size_t size = allocator.size();
-    SmallPage* page = allocateSmallPage(lock, sizeClass, lineCache);
+    SmallPage* page = allocateSmallPage(lock, sizeClass, lineCache, action);
+    if (!page) {
+        BASSERT(action == FailureAction::ReturnNull);
+        return;
+    }
     BASSERT(page->hasFreeLines(lock));
 
     auto findSmallBumpRange = [&](Object& it, Object& end) {
@@ -475,14 +496,14 @@ void Heap::allocateSmallBumpRangesByObject(
     for (;;) {
         if (!findSmallBumpRange(it, end)) {
             page->setHasFreeLines(lock, false);
-            BASSERT(allocator.canAllocate());
+            BASSERT(action == FailureAction::ReturnNull || allocator.canAllocate());
             return;
         }
 
         // In a fragmented page, some free ranges might not fit in the cache.
         if (rangeCache.size() == rangeCache.capacity()) {
             lineCache[sizeClass].push(page);
-            BASSERT(allocator.canAllocate());
+            BASSERT(action == FailureAction::ReturnNull || allocator.canAllocate());
             return;
         }
 
@@ -542,8 +563,16 @@ LargeRange Heap::splitAndAllocate(std::unique_lock<Mutex>&, LargeRange& range, s
     return range;
 }
 
-void* Heap::tryAllocateLarge(std::unique_lock<Mutex>& lock, size_t alignment, size_t size)
+void* Heap::allocateLarge(std::unique_lock<Mutex>& lock, size_t alignment, size_t size, FailureAction action)
 {
+#define ASSERT_OR_RETURN_ON_FAILURE(cond) do { \
+        if (action == FailureAction::Crash) \
+            RELEASE_BASSERT(cond); \
+        else if (!(cond)) \
+            return nullptr; \
+    } while (false)
+
+
     RELEASE_BASSERT(isActiveHeapKind(m_kind));
 
     BASSERT(isPowerOfTwo(alignment));
@@ -551,13 +580,11 @@ void* Heap::tryAllocateLarge(std::unique_lock<Mutex>& lock, size_t alignment, si
     m_scavenger->didStartGrowing();
     
     size_t roundedSize = size ? roundUpToMultipleOf(largeAlignment, size) : largeAlignment;
-    if (roundedSize < size) // Check for overflow
-        return nullptr;
+    ASSERT_OR_RETURN_ON_FAILURE(roundedSize >= size); // Check for overflow
     size = roundedSize;
 
     size_t roundedAlignment = roundUpToMultipleOf<largeAlignment>(alignment);
-    if (roundedAlignment < alignment) // Check for overflow
-        return nullptr;
+    ASSERT_OR_RETURN_ON_FAILURE(roundedAlignment >= alignment); // Check for overflow
     alignment = roundedAlignment;
 
     LargeRange range = m_largeFree.remove(alignment, size);
@@ -565,15 +592,13 @@ void* Heap::tryAllocateLarge(std::unique_lock<Mutex>& lock, size_t alignment, si
         if (m_hasPendingDecommits) {
             m_condition.wait(lock, [&]() { return !m_hasPendingDecommits; });
             // Now we're guaranteed we're looking at all available memory.
-            return tryAllocateLarge(lock, alignment, size);
+            return allocateLarge(lock, alignment, size, action);
         }
 
-        if (usingGigacage())
-            return nullptr;
+        ASSERT_OR_RETURN_ON_FAILURE(!usingGigacage());
 
         range = VMHeap::get()->tryAllocateLargeChunk(alignment, size);
-        if (!range)
-            return nullptr;
+        ASSERT_OR_RETURN_ON_FAILURE(range);
         
         m_largeFree.add(range);
         range = m_largeFree.remove(alignment, size);
@@ -582,14 +607,10 @@ void* Heap::tryAllocateLarge(std::unique_lock<Mutex>& lock, size_t alignment, si
     m_freeableMemory -= range.totalPhysicalSize();
 
     void* result = splitAndAllocate(lock, range, alignment, size).begin();
+    ASSERT_OR_RETURN_ON_FAILURE(result);
     return result;
-}
 
-void* Heap::allocateLarge(std::unique_lock<Mutex>& lock, size_t alignment, size_t size)
-{
-    void* result = tryAllocateLarge(lock, alignment, size);
-    RELEASE_BASSERT(result);
-    return result;
+#undef ASSERT_OR_RETURN_ON_FAILURE
 }
 
 bool Heap::isLarge(std::unique_lock<Mutex>&, void* object)
index 96735c5..b27192c 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2014-2018 Apple Inc. All rights reserved.
+ * Copyright (C) 2014-2019 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -28,6 +28,7 @@
 
 #include "BumpRange.h"
 #include "Chunk.h"
+#include "FailureAction.h"
 #include "HeapKind.h"
 #include "LargeMap.h"
 #include "LineMetadata.h"
@@ -64,12 +65,11 @@ public:
     HeapKind kind() const { return m_kind; }
     
     void allocateSmallBumpRanges(std::unique_lock<Mutex>&, size_t sizeClass,
-        BumpAllocator&, BumpRangeCache&, LineCache&);
+        BumpAllocator&, BumpRangeCache&, LineCache&, FailureAction);
     void derefSmallLine(std::unique_lock<Mutex>&, Object, LineCache&);
     void deallocateLineCache(std::unique_lock<Mutex>&, LineCache&);
 
-    void* allocateLarge(std::unique_lock<Mutex>&, size_t alignment, size_t);
-    void* tryAllocateLarge(std::unique_lock<Mutex>&, size_t alignment, size_t);
+    void* allocateLarge(std::unique_lock<Mutex>&, size_t alignment, size_t, FailureAction);
     void deallocateLarge(std::unique_lock<Mutex>&, void*);
 
     bool isLarge(std::unique_lock<Mutex>&, void*);
@@ -110,14 +110,14 @@ private:
     void initializePageMetadata();
 
     void allocateSmallBumpRangesByMetadata(std::unique_lock<Mutex>&,
-        size_t sizeClass, BumpAllocator&, BumpRangeCache&, LineCache&);
+        size_t sizeClass, BumpAllocator&, BumpRangeCache&, LineCache&, FailureAction);
     void allocateSmallBumpRangesByObject(std::unique_lock<Mutex>&,
-        size_t sizeClass, BumpAllocator&, BumpRangeCache&, LineCache&);
+        size_t sizeClass, BumpAllocator&, BumpRangeCache&, LineCache&, FailureAction);
 
-    SmallPage* allocateSmallPage(std::unique_lock<Mutex>&, size_t sizeClass, LineCache&);
+    SmallPage* allocateSmallPage(std::unique_lock<Mutex>&, size_t sizeClass, LineCache&, FailureAction);
     void deallocateSmallLine(std::unique_lock<Mutex>&, Object, LineCache&);
 
-    void allocateSmallChunk(std::unique_lock<Mutex>&, size_t pageClass);
+    void allocateSmallChunk(std::unique_lock<Mutex>&, size_t pageClass, FailureAction);
     void deallocateSmallChunk(Chunk*, size_t pageClass);
 
     void mergeLarge(BeginTag*&, EndTag*&, Range&);
@@ -157,11 +157,11 @@ private:
 inline void Heap::allocateSmallBumpRanges(
     std::unique_lock<Mutex>& lock, size_t sizeClass,
     BumpAllocator& allocator, BumpRangeCache& rangeCache,
-    LineCache& lineCache)
+    LineCache& lineCache, FailureAction action)
 {
     if (sizeClass < bmalloc::sizeClass(smallLineSize))
-        return allocateSmallBumpRangesByMetadata(lock, sizeClass, allocator, rangeCache, lineCache);
-    return allocateSmallBumpRangesByObject(lock, sizeClass, allocator, rangeCache, lineCache);
+        return allocateSmallBumpRangesByMetadata(lock, sizeClass, allocator, rangeCache, lineCache, action);
+    return allocateSmallBumpRangesByObject(lock, sizeClass, allocator, rangeCache, lineCache, action);
 }
 
 inline void Heap::derefSmallLine(std::unique_lock<Mutex>& lock, Object object, LineCache& lineCache)
index 59b6642..16be358 100644 (file)
@@ -59,7 +59,7 @@ void* tryLargeZeroedMemalignVirtual(size_t requiredAlignment, size_t requestedSi
         Heap& heap = PerProcess<PerHeapKind<Heap>>::get()->at(kind);
 
         std::unique_lock<Mutex> lock(Heap::mutex());
-        result = heap.tryAllocateLarge(lock, alignment, size);
+        result = heap.allocateLarge(lock, alignment, size, FailureAction::ReturnNull);
         if (result) {
             // Don't track this as dirty memory that dictates how we drive the scavenger.
             // FIXME: We should make it so that users of this API inform bmalloc which