+2019-01-18 Matt Lewis <jlewis3@apple.com>
+
+ Unreviewed, rolling out r240160.
+
+ This broke multiple internal builds.
+
+ Reverted changeset:
+
+ "Gigacages should start allocations from a slide"
+ https://bugs.webkit.org/show_bug.cgi?id=193523
+ https://trac.webkit.org/changeset/240160
+
2019-01-18 Keith Miller <keith_miller@apple.com>
Gigacages should start allocations from a slide
arrayProfile(OpGetById::Metadata::m_modeMetadata.arrayLengthMode.arrayProfile, t0, t2, t5)
btiz t0, IsArray, .opGetByIdSlow
btiz t0, IndexingShapeMask, .opGetByIdSlow
- loadCaged(_g_gigacageBasePtrs + Gigacage::BasePtrs::jsValue, constexpr Gigacage::jsValueGigacageMask, JSObject::m_butterfly[t3], t0, t1)
+ loadCaged(_g_gigacageBasePtrs + Gigacage::BasePtrs::jsValue, constexpr JSVALUE_GIGACAGE_MASK, JSObject::m_butterfly[t3], t0, t1)
loadi -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t0], t0
bilt t0, 0, .opGetByIdSlow
orq tagTypeNumber, t0
loadConstantOrVariableInt32(size, t3, t1, .opGetByValSlow)
sxi2q t1, t1
- loadCaged(_g_gigacageBasePtrs + Gigacage::BasePtrs::jsValue, constexpr Gigacage::jsValueGigacageMask, JSObject::m_butterfly[t0], t3, tagTypeNumber)
+ loadCaged(_g_gigacageBasePtrs + Gigacage::BasePtrs::jsValue, constexpr JSVALUE_GIGACAGE_MASK, JSObject::m_butterfly[t0], t3, tagTypeNumber)
move TagTypeNumber, tagTypeNumber
andi IndexingShapeMask, t2
bia t2, Int8ArrayType - FirstTypedArrayType, .opGetByValUint8ArrayOrUint8ClampedArray
# We have Int8ArrayType.
- loadCaged(_g_gigacageBasePtrs + Gigacage::BasePtrs::primitive, constexpr Gigacage::primitiveGigacageMask, JSArrayBufferView::m_vector[t0], t3, t2)
+ loadCaged(_g_gigacageBasePtrs + Gigacage::BasePtrs::primitive, constexpr PRIMITIVE_GIGACAGE_MASK, JSArrayBufferView::m_vector[t0], t3, t2)
loadbs [t3, t1], t0
finishIntGetByVal(t0, t1)
bia t2, Uint8ArrayType - FirstTypedArrayType, .opGetByValUint8ClampedArray
# We have Uint8ArrayType.
- loadCaged(_g_gigacageBasePtrs + Gigacage::BasePtrs::primitive, constexpr Gigacage::primitiveGigacageMask, JSArrayBufferView::m_vector[t0], t3, t2)
+ loadCaged(_g_gigacageBasePtrs + Gigacage::BasePtrs::primitive, constexpr PRIMITIVE_GIGACAGE_MASK, JSArrayBufferView::m_vector[t0], t3, t2)
loadb [t3, t1], t0
finishIntGetByVal(t0, t1)
.opGetByValUint8ClampedArray:
# We have Uint8ClampedArrayType.
- loadCaged(_g_gigacageBasePtrs + Gigacage::BasePtrs::primitive, constexpr Gigacage::primitiveGigacageMask, JSArrayBufferView::m_vector[t0], t3, t2)
+ loadCaged(_g_gigacageBasePtrs + Gigacage::BasePtrs::primitive, constexpr PRIMITIVE_GIGACAGE_MASK, JSArrayBufferView::m_vector[t0], t3, t2)
loadb [t3, t1], t0
finishIntGetByVal(t0, t1)
bia t2, Int16ArrayType - FirstTypedArrayType, .opGetByValUint16Array
# We have Int16ArrayType.
- loadCaged(_g_gigacageBasePtrs + Gigacage::BasePtrs::primitive, constexpr Gigacage::primitiveGigacageMask, JSArrayBufferView::m_vector[t0], t3, t2)
+ loadCaged(_g_gigacageBasePtrs + Gigacage::BasePtrs::primitive, constexpr PRIMITIVE_GIGACAGE_MASK, JSArrayBufferView::m_vector[t0], t3, t2)
loadhs [t3, t1, 2], t0
finishIntGetByVal(t0, t1)
.opGetByValUint16Array:
# We have Uint16ArrayType.
- loadCaged(_g_gigacageBasePtrs + Gigacage::BasePtrs::primitive, constexpr Gigacage::primitiveGigacageMask, JSArrayBufferView::m_vector[t0], t3, t2)
+ loadCaged(_g_gigacageBasePtrs + Gigacage::BasePtrs::primitive, constexpr PRIMITIVE_GIGACAGE_MASK, JSArrayBufferView::m_vector[t0], t3, t2)
loadh [t3, t1, 2], t0
finishIntGetByVal(t0, t1)
bia t2, Int32ArrayType - FirstTypedArrayType, .opGetByValUint32Array
# We have Int32ArrayType.
- loadCaged(_g_gigacageBasePtrs + Gigacage::BasePtrs::primitive, constexpr Gigacage::primitiveGigacageMask, JSArrayBufferView::m_vector[t0], t3, t2)
+ loadCaged(_g_gigacageBasePtrs + Gigacage::BasePtrs::primitive, constexpr PRIMITIVE_GIGACAGE_MASK, JSArrayBufferView::m_vector[t0], t3, t2)
loadi [t3, t1, 4], t0
finishIntGetByVal(t0, t1)
.opGetByValUint32Array:
# We have Uint32ArrayType.
- loadCaged(_g_gigacageBasePtrs + Gigacage::BasePtrs::primitive, constexpr Gigacage::primitiveGigacageMask, JSArrayBufferView::m_vector[t0], t3, t2)
+ loadCaged(_g_gigacageBasePtrs + Gigacage::BasePtrs::primitive, constexpr PRIMITIVE_GIGACAGE_MASK, JSArrayBufferView::m_vector[t0], t3, t2)
# This is the hardest part because of large unsigned values.
loadi [t3, t1, 4], t0
bilt t0, 0, .opGetByValSlow # This case is still awkward to implement in LLInt.
bieq t2, Float32ArrayType - FirstTypedArrayType, .opGetByValSlow
# We have Float64ArrayType.
- loadCaged(_g_gigacageBasePtrs + Gigacage::BasePtrs::primitive, constexpr Gigacage::primitiveGigacageMask, JSArrayBufferView::m_vector[t0], t3, t2)
+ loadCaged(_g_gigacageBasePtrs + Gigacage::BasePtrs::primitive, constexpr PRIMITIVE_GIGACAGE_MASK, JSArrayBufferView::m_vector[t0], t3, t2)
loadd [t3, t1, 8], ft0
bdnequn ft0, ft0, .opGetByValSlow
finishDoubleGetByVal(ft0, t0, t1)
get(m_property, t0)
loadConstantOrVariableInt32(size, t0, t3, .opPutByValSlow)
sxi2q t3, t3
- loadCaged(_g_gigacageBasePtrs + Gigacage::BasePtrs::jsValue, constexpr Gigacage::jsValueGigacageMask, JSObject::m_butterfly[t1], t0, tagTypeNumber)
+ loadCaged(_g_gigacageBasePtrs + Gigacage::BasePtrs::jsValue, constexpr JSVALUE_GIGACAGE_MASK, JSObject::m_butterfly[t1], t0, tagTypeNumber)
move TagTypeNumber, tagTypeNumber
btinz t2, CopyOnWrite, .opPutByValSlow
andi IndexingShapeMask, t2
+2019-01-18 Matt Lewis <jlewis3@apple.com>
+
+ Unreviewed, rolling out r240160.
+
+ This broke multiple internal builds.
+
+ Reverted changeset:
+
+ "Gigacages should start allocations from a slide"
+ https://bugs.webkit.org/show_bug.cgi?id=193523
+ https://trac.webkit.org/changeset/240160
+
2019-01-18 Keith Miller <keith_miller@apple.com>
Gigacages should start allocations from a slide
#if defined(USE_SYSTEM_MALLOC) && USE_SYSTEM_MALLOC
-namespace Gigacage {
+alignas(void*) char g_gigacageBasePtrs[GIGACAGE_BASE_PTRS_SIZE];
-alignas(void*) char g_gigacageBasePtrs[gigacageBasePtrsSize];
+namespace Gigacage {
void* tryMalloc(Kind, size_t size)
{
}
} // namespace Gigacage
-#else // defined(USE_SYSTEM_MALLOC) && USE_SYSTEM_MALLOC
+#else
#include <bmalloc/bmalloc.h>
namespace Gigacage {
#pragma once
#include <wtf/FastMalloc.h>
-#include <wtf/StdLibExtras.h>
#if defined(USE_SYSTEM_MALLOC) && USE_SYSTEM_MALLOC
#define GIGACAGE_ENABLED 0
+#define PRIMITIVE_GIGACAGE_MASK 0
+#define JSVALUE_GIGACAGE_MASK 0
+#define GIGACAGE_BASE_PTRS_SIZE 8192
-namespace Gigacage {
-
-const size_t primitiveGigacageMask = 0;
-const size_t jsValueGigacageMask = 0;
-const size_t gigacageBasePtrsSize = 8 * KB;
+extern "C" {
+alignas(void*) extern WTF_EXPORT_PRIVATE char g_gigacageBasePtrs[GIGACAGE_BASE_PTRS_SIZE];
+}
-extern "C" alignas(void*) WTF_EXPORT_PRIVATE char g_gigacageBasePtrs[gigacageBasePtrsSize];
+namespace Gigacage {
struct BasePtrs {
uintptr_t reservedForFlags;
+2019-01-18 Matt Lewis <jlewis3@apple.com>
+
+ Unreviewed, rolling out r240160.
+
+ This broke multiple internal builds.
+
+ Reverted changeset:
+
+ "Gigacages should start allocations from a slide"
+ https://bugs.webkit.org/show_bug.cgi?id=193523
+ https://trac.webkit.org/changeset/240160
+
2019-01-18 Keith Miller <keith_miller@apple.com>
Gigacages should start allocations from a slide
#include <cstdio>
#include <mutex>
-#if GIGACAGE_ENABLED
-
-namespace Gigacage {
-
// This is exactly 32GB because inside JSC, indexed accesses for arrays, typed arrays, etc,
// use unsigned 32-bit ints as indices. The items those indices access are 8 bytes or less
// in size. 2^32 * 8 = 32GB. This means if an access on a caged type happens to go out of
// bounds, the access is guaranteed to land somewhere else in the cage or inside the runway.
// If this were less than 32GB, those OOB accesses could reach outside of the cage.
-constexpr size_t gigacageRunway = 32llu * 1024 * 1024 * 1024;
+#define GIGACAGE_RUNWAY (32llu * 1024 * 1024 * 1024)
// Note: g_gigacageBasePtrs[0] is reserved for storing the wasEnabled flag.
// The first gigacageBasePtr will start at g_gigacageBasePtrs[sizeof(void*)].
// This is done so that the wasEnabled flag will also be protected along with the
// gigacageBasePtrs.
-alignas(gigacageBasePtrsSize) char g_gigacageBasePtrs[gigacageBasePtrsSize];
+alignas(GIGACAGE_BASE_PTRS_SIZE) char g_gigacageBasePtrs[GIGACAGE_BASE_PTRS_SIZE];
using namespace bmalloc;
+namespace Gigacage {
+
namespace {
bool s_isDisablingPrimitiveGigacageDisabled;
uintptr_t basePtrs = reinterpret_cast<uintptr_t>(g_gigacageBasePtrs);
// We might only get page size alignment, but that's also the minimum we need.
RELEASE_BASSERT(!(basePtrs & (vmPageSize() - 1)));
- mprotect(g_gigacageBasePtrs, gigacageBasePtrsSize, PROT_READ);
+ mprotect(g_gigacageBasePtrs, GIGACAGE_BASE_PTRS_SIZE, PROT_READ);
}
void unprotectGigacageBasePtrs()
{
- mprotect(g_gigacageBasePtrs, gigacageBasePtrsSize, PROT_READ | PROT_WRITE);
+ mprotect(g_gigacageBasePtrs, GIGACAGE_BASE_PTRS_SIZE, PROT_READ | PROT_WRITE);
}
class UnprotectGigacageBasePtrsScope {
Vector<Callback> callbacks;
};
+#if GIGACAGE_ENABLED
size_t runwaySize(Kind kind)
{
switch (kind) {
case Kind::ReservedForFlagsAndNotABasePtr:
RELEASE_BASSERT_NOT_REACHED();
case Kind::Primitive:
- return gigacageRunway;
+ return static_cast<size_t>(GIGACAGE_RUNWAY);
case Kind::JSValue:
- return 0;
+ return static_cast<size_t>(0);
}
- return 0;
+ return static_cast<size_t>(0);
}
+#endif
} // anonymous namespace
void ensureGigacage()
{
+#if GIGACAGE_ENABLED
static std::once_flag onceFlag;
std::call_once(
onceFlag,
setWasEnabled();
protectGigacageBasePtrs();
});
+#endif // GIGACAGE_ENABLED
}
void disablePrimitiveGigacage()
bool shouldBeEnabled()
{
static bool cached = false;
+
+#if GIGACAGE_ENABLED
static std::once_flag onceFlag;
std::call_once(
onceFlag,
cached = true;
});
+#endif // GIGACAGE_ENABLED
+
return cached;
}
} // namespace Gigacage
-#endif // GIGACAGE_ENABLED
#include "BExport.h"
#include "BInline.h"
#include "BPlatform.h"
-#include "Sizes.h"
#include <cstddef>
#include <inttypes.h>
-#if ((BOS(DARWIN) || BOS(LINUX)) && \
-(BCPU(X86_64) || (BCPU(ARM64) && !defined(__ILP32__) && (!BPLATFORM(IOS_FAMILY) || BPLATFORM(IOS)))))
-#define GIGACAGE_ENABLED 1
-#else
-#define GIGACAGE_ENABLED 0
-#endif
-
-
-namespace Gigacage {
-
-enum Kind {
- ReservedForFlagsAndNotABasePtr = 0,
- Primitive,
- JSValue,
-};
-
-BINLINE const char* name(Kind kind)
-{
- switch (kind) {
- case ReservedForFlagsAndNotABasePtr:
- RELEASE_BASSERT_NOT_REACHED();
- case Primitive:
- return "Primitive";
- case JSValue:
- return "JSValue";
- }
- BCRASH();
- return nullptr;
-}
-
-#if GIGACAGE_ENABLED
-
#if BCPU(ARM64)
-constexpr size_t primitiveGigacageSize = 2 * bmalloc::Sizes::GB;
-constexpr size_t jsValueGigacageSize = 1 * bmalloc::Sizes::GB;
-constexpr size_t gigacageBasePtrsSize = 16 * bmalloc::Sizes::kB;
-constexpr size_t minimumCageSizeAfterSlide = bmalloc::Sizes::GB / 2;
+#define PRIMITIVE_GIGACAGE_SIZE 0x80000000llu
+#define JSVALUE_GIGACAGE_SIZE 0x40000000llu
#define GIGACAGE_ALLOCATION_CAN_FAIL 1
#else
-constexpr size_t primitiveGigacageSize = 32 * bmalloc::Sizes::GB;
-constexpr size_t jsValueGigacageSize = 16 * bmalloc::Sizes::GB;
-constexpr size_t gigacageBasePtrsSize = 4 * bmalloc::Sizes::kB;
-constexpr size_t minimumCageSizeAfterSlide = 4 * bmalloc::Sizes::GB;
+#define PRIMITIVE_GIGACAGE_SIZE 0x800000000llu
+#define JSVALUE_GIGACAGE_SIZE 0x400000000llu
#define GIGACAGE_ALLOCATION_CAN_FAIL 0
#endif
#define GIGACAGE_ALLOCATION_CAN_FAIL 1
#endif
+static_assert(bmalloc::isPowerOfTwo(PRIMITIVE_GIGACAGE_SIZE), "");
+static_assert(bmalloc::isPowerOfTwo(JSVALUE_GIGACAGE_SIZE), "");
-static_assert(bmalloc::isPowerOfTwo(primitiveGigacageSize), "");
-static_assert(bmalloc::isPowerOfTwo(jsValueGigacageSize), "");
-static_assert(primitiveGigacageSize > minimumCageSizeAfterSlide, "");
-static_assert(jsValueGigacageSize > minimumCageSizeAfterSlide, "");
+#define GIGACAGE_SIZE_TO_MASK(size) ((size) - 1)
-constexpr size_t gigacageSizeToMask(size_t size) { return size - 1; }
+#define PRIMITIVE_GIGACAGE_MASK GIGACAGE_SIZE_TO_MASK(PRIMITIVE_GIGACAGE_SIZE)
+#define JSVALUE_GIGACAGE_MASK GIGACAGE_SIZE_TO_MASK(JSVALUE_GIGACAGE_SIZE)
-constexpr size_t primitiveGigacageMask = gigacageSizeToMask(primitiveGigacageSize);
-constexpr size_t jsValueGigacageMask = gigacageSizeToMask(jsValueGigacageSize);
+#if ((BOS(DARWIN) || BOS(LINUX)) && \
+ (BCPU(X86_64) || (BCPU(ARM64) && !defined(__ILP32__) && (!BPLATFORM(IOS_FAMILY) || BPLATFORM(IOS)))))
+#define GIGACAGE_ENABLED 1
+#else
+#define GIGACAGE_ENABLED 0
+#endif
+
+#if BCPU(ARM64)
+#define GIGACAGE_BASE_PTRS_SIZE 16384
+#else
+#define GIGACAGE_BASE_PTRS_SIZE 4096
+#endif
-extern "C" alignas(gigacageBasePtrsSize) BEXPORT char g_gigacageBasePtrs[gigacageBasePtrsSize];
+extern "C" alignas(GIGACAGE_BASE_PTRS_SIZE) BEXPORT char g_gigacageBasePtrs[GIGACAGE_BASE_PTRS_SIZE];
+
+namespace Gigacage {
BINLINE bool wasEnabled() { return g_gigacageBasePtrs[0]; }
BINLINE void setWasEnabled() { g_gigacageBasePtrs[0] = true; }
void* jsValue;
};
+enum Kind {
+ ReservedForFlagsAndNotABasePtr = 0,
+ Primitive,
+ JSValue,
+};
+
static_assert(offsetof(BasePtrs, primitive) == Kind::Primitive * sizeof(void*), "");
static_assert(offsetof(BasePtrs, jsValue) == Kind::JSValue * sizeof(void*), "");
-constexpr unsigned numKinds = 2;
+static constexpr unsigned numKinds = 2;
BEXPORT void ensureGigacage();
inline bool isPrimitiveGigacagePermanentlyEnabled() { return isDisablingPrimitiveGigacageDisabled(); }
inline bool canPrimitiveGigacageBeDisabled() { return !isDisablingPrimitiveGigacageDisabled(); }
+BINLINE const char* name(Kind kind)
+{
+ switch (kind) {
+ case ReservedForFlagsAndNotABasePtr:
+ RELEASE_BASSERT_NOT_REACHED();
+ case Primitive:
+ return "Primitive";
+ case JSValue:
+ return "JSValue";
+ }
+ BCRASH();
+ return nullptr;
+}
+
BINLINE void*& basePtr(BasePtrs& basePtrs, Kind kind)
{
switch (kind) {
case ReservedForFlagsAndNotABasePtr:
RELEASE_BASSERT_NOT_REACHED();
case Primitive:
- return static_cast<size_t>(primitiveGigacageSize);
+ return static_cast<size_t>(PRIMITIVE_GIGACAGE_SIZE);
case JSValue:
- return static_cast<size_t>(jsValueGigacageSize);
+ return static_cast<size_t>(JSVALUE_GIGACAGE_SIZE);
}
BCRASH();
return 0;
BINLINE size_t mask(Kind kind)
{
- return gigacageSizeToMask(size(kind));
+ return GIGACAGE_SIZE_TO_MASK(size(kind));
}
template<typename Func>
BEXPORT bool shouldBeEnabled();
-#else // GIGACAGE_ENABLED
-
-BINLINE void*& basePtr(Kind)
-{
- BCRASH();
- static void* unreachable;
- return unreachable;
-}
-BINLINE size_t size(Kind) { BCRASH(); return 0; }
-BINLINE void ensureGigacage() { }
-BINLINE bool wasEnabled() { return false; }
-BINLINE bool isCaged(Kind, const void*) { return true; }
-template<typename T> BINLINE T* caged(Kind, T* ptr) { return ptr; }
-BINLINE void disableDisablingPrimitiveGigacageIfShouldBeEnabled() { }
-BINLINE void disablePrimitiveGigacage() { }
-BINLINE void addPrimitiveDisableCallback(void (*)(void*), void*) { }
-BINLINE void removePrimitiveDisableCallback(void (*)(void*), void*) { }
-
-#endif // GIGACAGE_ENABLED
-
} // namespace Gigacage
-
#include "BulkDecommit.h"
#include "BumpAllocator.h"
#include "Chunk.h"
-#include "CryptoRandom.h"
#include "Environment.h"
#include "Gigacage.h"
#include "DebugHeap.h"
#if GIGACAGE_ENABLED
if (usingGigacage()) {
RELEASE_BASSERT(gigacageBasePtr());
- uint64_t random;
- cryptoRandom(reinterpret_cast<unsigned char*>(&random), sizeof(random));
- ptrdiff_t offset = random % (gigacageSize() - Gigacage::minimumCageSizeAfterSlide);
- offset = reinterpret_cast<ptrdiff_t>(roundDownToMultipleOf(vmPageSize(), reinterpret_cast<void*>(offset)));
- void* base = reinterpret_cast<unsigned char*>(gigacageBasePtr()) + offset;
- m_largeFree.add(LargeRange(base, gigacageSize() - offset, 0, 0));
+ m_largeFree.add(LargeRange(gigacageBasePtr(), gigacageSize(), 0, 0));
}
#endif
}
// Repository for malloc sizing constants and calculations.
namespace Sizes {
-static constexpr size_t kB = 1024;
-static constexpr size_t MB = kB * kB;
-static constexpr size_t GB = kB * kB * kB;
-
-static constexpr size_t alignment = 8;
-static constexpr size_t alignmentMask = alignment - 1ul;
-
-static constexpr size_t chunkSize = 1 * MB;
-static constexpr size_t chunkMask = ~(chunkSize - 1ul);
-
-static constexpr size_t smallLineSize = 256;
-static constexpr size_t smallPageSize = 4 * kB;
-static constexpr size_t smallPageLineCount = smallPageSize / smallLineSize;
-
-static constexpr size_t maskSizeClassMax = 512;
-static constexpr size_t smallMax = 32 * kB;
-
-static constexpr size_t pageSizeMax = smallMax * 2;
-static constexpr size_t pageClassCount = pageSizeMax / smallPageSize;
-
-static constexpr size_t pageSizeWasteFactor = 8;
-static constexpr size_t logWasteFactor = 8;
-
-static constexpr size_t largeAlignment = smallMax / pageSizeWasteFactor;
-static constexpr size_t largeAlignmentMask = largeAlignment - 1;
-
-static constexpr size_t deallocatorLogCapacity = 512;
-static constexpr size_t bumpRangeCacheCapacity = 3;
-
-static constexpr size_t scavengerBytesPerMemoryPressureCheck = 16 * MB;
-static constexpr double memoryPressureThreshold = 0.75;
-
-static constexpr size_t maskSizeClassCount = maskSizeClassMax / alignment;
-
-constexpr size_t maskSizeClass(size_t size)
-{
- // We mask to accommodate zero.
- return mask((size - 1) / alignment, maskSizeClassCount - 1);
-}
-
-inline size_t maskObjectSize(size_t maskSizeClass)
-{
- return (maskSizeClass + 1) * alignment;
-}
-
-static constexpr size_t logAlignmentMin = maskSizeClassMax / logWasteFactor;
-
-static constexpr size_t logSizeClassCount = (log2(smallMax) - log2(maskSizeClassMax)) * logWasteFactor;
-
-inline size_t logSizeClass(size_t size)
-{
- size_t base = log2(size - 1) - log2(maskSizeClassMax);
- size_t offset = (size - 1 - (maskSizeClassMax << base));
- return base * logWasteFactor + offset / (logAlignmentMin << base);
-}
-
-inline size_t logObjectSize(size_t logSizeClass)
-{
- size_t base = logSizeClass / logWasteFactor;
- size_t offset = logSizeClass % logWasteFactor;
- return (maskSizeClassMax << base) + (offset + 1) * (logAlignmentMin << base);
-}
-
-static constexpr size_t sizeClassCount = maskSizeClassCount + logSizeClassCount;
-
-inline size_t sizeClass(size_t size)
-{
- if (size <= maskSizeClassMax)
- return maskSizeClass(size);
- return maskSizeClassCount + logSizeClass(size);
-}
-
-inline size_t objectSize(size_t sizeClass)
-{
- if (sizeClass < maskSizeClassCount)
- return maskObjectSize(sizeClass);
- return logObjectSize(sizeClass - maskSizeClassCount);
-}
-
-inline size_t pageSize(size_t pageClass)
-{
- return (pageClass + 1) * smallPageSize;
+ static const size_t kB = 1024;
+ static const size_t MB = kB * kB;
+
+ static const size_t alignment = 8;
+ static const size_t alignmentMask = alignment - 1ul;
+
+ static const size_t chunkSize = 1 * MB;
+ static const size_t chunkMask = ~(chunkSize - 1ul);
+
+ static const size_t smallLineSize = 256;
+ static const size_t smallPageSize = 4 * kB;
+ static const size_t smallPageLineCount = smallPageSize / smallLineSize;
+
+ static const size_t maskSizeClassMax = 512;
+ static const size_t smallMax = 32 * kB;
+
+ static const size_t pageSizeMax = smallMax * 2;
+ static const size_t pageClassCount = pageSizeMax / smallPageSize;
+
+ static const size_t pageSizeWasteFactor = 8;
+ static const size_t logWasteFactor = 8;
+
+ static const size_t largeAlignment = smallMax / pageSizeWasteFactor;
+ static const size_t largeAlignmentMask = largeAlignment - 1;
+
+ static const size_t deallocatorLogCapacity = 512;
+ static const size_t bumpRangeCacheCapacity = 3;
+
+ static const size_t scavengerBytesPerMemoryPressureCheck = 16 * MB;
+ static const double memoryPressureThreshold = 0.75;
+
+ static const size_t maskSizeClassCount = maskSizeClassMax / alignment;
+
+ constexpr size_t maskSizeClass(size_t size)
+ {
+ // We mask to accommodate zero.
+ return mask((size - 1) / alignment, maskSizeClassCount - 1);
+ }
+
+ inline size_t maskObjectSize(size_t maskSizeClass)
+ {
+ return (maskSizeClass + 1) * alignment;
+ }
+
+ static const size_t logAlignmentMin = maskSizeClassMax / logWasteFactor;
+
+ static const size_t logSizeClassCount = (log2(smallMax) - log2(maskSizeClassMax)) * logWasteFactor;
+
+ inline size_t logSizeClass(size_t size)
+ {
+ size_t base = log2(size - 1) - log2(maskSizeClassMax);
+ size_t offset = (size - 1 - (maskSizeClassMax << base));
+ return base * logWasteFactor + offset / (logAlignmentMin << base);
+ }
+
+ inline size_t logObjectSize(size_t logSizeClass)
+ {
+ size_t base = logSizeClass / logWasteFactor;
+ size_t offset = logSizeClass % logWasteFactor;
+ return (maskSizeClassMax << base) + (offset + 1) * (logAlignmentMin << base);
+ }
+
+ static const size_t sizeClassCount = maskSizeClassCount + logSizeClassCount;
+
+ inline size_t sizeClass(size_t size)
+ {
+ if (size <= maskSizeClassMax)
+ return maskSizeClass(size);
+ return maskSizeClassCount + logSizeClass(size);
+ }
+
+ inline size_t objectSize(size_t sizeClass)
+ {
+ if (sizeClass < maskSizeClassCount)
+ return maskObjectSize(sizeClass);
+ return logObjectSize(sizeClass - maskSizeClassCount);
+ }
+
+ inline size_t pageSize(size_t pageClass)
+ {
+ return (pageClass + 1) * smallPageSize;
+ }
}
-} // namespace Sizes
using namespace Sizes;