+2007-11-29 Oliver Hunt <oliver@apple.com>
+
+ Reviewed by Geoff.
+
+ Merging updated system alloc and spinlock code from r38 of TCMalloc.
+
+ This is needed as a precursor to the merge of TCMalloc proper.
+
+ * wtf/FastMalloc.cpp:
+ (WTF::TCMalloc_PageHeap::GrowHeap):
+ * wtf/TCSpinLock.h:
+ (TCMalloc_SpinLock::TCMalloc_SpinLock):
+ (TCMalloc_SpinLock::):
+ (TCMalloc_SpinLock::Lock):
+ (TCMalloc_SpinLock::Unlock):
+ (TCMalloc_SpinLock::IsHeld):
+ * wtf/TCSystemAlloc.cpp:
+ (TrySbrk):
+ (TryMmap):
+ (TryVirtualAlloc):
+ (TryDevMem):
+ (TCMalloc_SystemAlloc):
+ * wtf/TCSystemAlloc.h:
+
2007-11-28 Brady Eidson <beidson@apple.com>
Reviewed by Geoff
// Metadata allocator -- keeps stats about how many bytes allocated
static uint64_t metadata_system_bytes = 0;
static void* MetaDataAlloc(size_t bytes) {
- void* result = TCMalloc_SystemAlloc(bytes);
+ void* result = TCMalloc_SystemAlloc(bytes, 0);
if (result != NULL) {
metadata_system_bytes += bytes;
}
bool TCMalloc_PageHeap::GrowHeap(Length n) {
ASSERT(kMaxPages >= kMinSystemAlloc);
Length ask = (n>kMinSystemAlloc) ? n : static_cast<Length>(kMinSystemAlloc);
- void* ptr = TCMalloc_SystemAlloc(ask << kPageShift, kPageSize);
+ void* ptr = TCMalloc_SystemAlloc(ask << kPageShift, 0, kPageSize);
if (ptr == NULL) {
if (n < ask) {
// Try growing just "n" pages
ask = n;
- ptr = TCMalloc_SystemAlloc(ask << kPageShift, kPageSize);
+ ptr = TCMalloc_SystemAlloc(ask << kPageShift, 0, kPageSize);
}
if (ptr == NULL) return false;
}
//-------------------------------------------------------------------
void TCMalloc_Central_FreeList::Init(size_t cl) {
- lock_.Init();
size_class_ = cl;
DLL_Init(&empty_);
DLL_Init(&nonempty_);
-// Copyright (c) 2005, Google Inc.
+// Copyright (c) 2005, 2006, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// The following is a struct so that it can be initialized at compile time
struct TCMalloc_SpinLock {
- volatile unsigned int private_lockword_;
- inline void Init() { private_lockword_ = 0; }
- inline void Finalize() { }
-
inline void Lock() {
int r;
#if COMPILER(GCC)
#if PLATFORM(X86)
__asm__ __volatile__
("xchgl %0, %1"
- : "=r"(r), "=m"(private_lockword_)
- : "0"(1), "m"(private_lockword_)
+ : "=r"(r), "=m"(lockword_)
+ : "0"(1), "m"(lockword_)
: "memory");
#else
- volatile unsigned int *lockword_ptr = &private_lockword_;
+ volatile unsigned int *lockword_ptr = &lockword_;
__asm__ __volatile__
("1: lwarx %0, 0, %1\n\t"
"stwcx. %2, 0, %1\n\t"
#endif
#elif COMPILER(MSVC)
__asm {
- mov eax, this ; store &private_lockword_ (which is this+0) in eax
+ mov eax, this ; store &lockword_ (which is this+0) in eax
mov ebx, 1 ; store 1 in ebx
- xchg [eax], ebx ; exchange private_lockword_ and 1
- mov r, ebx ; store old value of private_lockword_ in r
+ xchg [eax], ebx ; exchange lockword_ and 1
+ mov r, ebx ; store old value of lockword_ in r
}
#endif
- if (r) TCMalloc_SlowLock(&private_lockword_);
+ if (r) TCMalloc_SlowLock(&lockword_);
}
inline void Unlock() {
#if PLATFORM(X86)
__asm__ __volatile__
("movl $0, %0"
- : "=m"(private_lockword_)
- : "m" (private_lockword_)
+ : "=m"(lockword_)
+ : "m" (lockword_)
: "memory");
#else
__asm__ __volatile__
("isync\n\t"
"eieio\n\t"
"stw %1, %0"
- : "=o" (private_lockword_)
+ : "=o" (lockword_)
: "r" (0)
: "memory");
#endif
#elif COMPILER(MSVC)
__asm {
- mov eax, this ; store &private_lockword_ (which is this+0) in eax
- mov [eax], 0 ; set private_lockword_ to 0
+ mov eax, this ; store &lockword_ (which is this+0) in eax
+ mov [eax], 0 ; set lockword_ to 0
}
#endif
}
+ // Report if we think the lock can be held by this thread.
+ // When the lock is truly held by the invoking thread
+ // we will always return true.
+ // Indended to be used as CHECK(lock.IsHeld());
+ inline bool IsHeld() const {
+ return lockword_ != 0;
+ }
-#ifdef WTF_CHANGES
- inline bool IsLocked() {
- return private_lockword_ != 0;
- }
-#endif
+ volatile unsigned int lockword_;
};
#define SPINLOCK_INITIALIZER { 0 }
#if HAVE(SBRK)
-static void* TrySbrk(size_t size, size_t alignment) {
+static void* TrySbrk(size_t size, size_t *actual_size, size_t alignment) {
size = ((size + alignment - 1) / alignment) * alignment;
+
+ // could theoretically return the "extra" bytes here, but this
+ // is simple and correct.
+ if (actual_size)
+ *actual_size = size;
+
void* result = sbrk(size);
if (result == reinterpret_cast<void*>(-1)) {
sbrk_failure = true;
#if HAVE(MMAP)
-static void* TryMmap(size_t size, size_t alignment) {
+static void* TryMmap(size_t size, size_t *actual_size, size_t alignment) {
// Enforce page alignment
if (pagesize == 0) pagesize = getpagesize();
if (alignment < pagesize) alignment = pagesize;
size = ((size + alignment - 1) / alignment) * alignment;
-
+
+ // could theoretically return the "extra" bytes here, but this
+ // is simple and correct.
+ if (actual_size)
+ *actual_size = size;
+
// Ask for extra memory if alignment > pagesize
size_t extra = 0;
if (alignment > pagesize) {
#if HAVE(VIRTUALALLOC)
-static void* TryVirtualAlloc(size_t size, size_t alignment) {
+static void* TryVirtualAlloc(size_t size, size_t *actual_size, size_t alignment) {
// Enforce page alignment
if (pagesize == 0) {
SYSTEM_INFO system_info;
GetSystemInfo(&system_info);
pagesize = system_info.dwPageSize;
}
+
if (alignment < pagesize) alignment = pagesize;
size = ((size + alignment - 1) / alignment) * alignment;
+ // could theoretically return the "extra" bytes here, but this
+ // is simple and correct.
+ if (actual_size)
+ *actual_size = size;
+
// Ask for extra memory if alignment > pagesize
size_t extra = 0;
if (alignment > pagesize) {
#endif /* HAVE(MMAP) */
#ifndef WTF_CHANGES
-static void* TryDevMem(size_t size, size_t alignment) {
+static void* TryDevMem(size_t size, size_t *actual_size, size_t alignment) {
static bool initialized = false;
static off_t physmem_base; // next physical memory address to allocate
static off_t physmem_limit; // maximum physical address allowed
if (pagesize == 0) pagesize = getpagesize();
if (alignment < pagesize) alignment = pagesize;
size = ((size + alignment - 1) / alignment) * alignment;
-
+
+ // could theoretically return the "extra" bytes here, but this
+ // is simple and correct.
+ if (actual_size)
+ *actual_size = size;
+
// Ask for extra memory if alignment > pagesize
size_t extra = 0;
if (alignment > pagesize) {
}
#endif
-void* TCMalloc_SystemAlloc(size_t size, size_t alignment) {
-#ifndef WTF_CHANGES
- if (TCMallocDebug::level >= TCMallocDebug::kVerbose) {
- MESSAGE("TCMalloc_SystemAlloc(%" PRIuS ", %" PRIuS")\n",
- size, alignment);
- }
-#endif
+void* TCMalloc_SystemAlloc(size_t size, size_t *actual_size, size_t alignment) {
+ // Discard requests that overflow
+ if (size + alignment < size) return NULL;
+
SpinLockHolder lock_holder(&spinlock);
// Enforce minimum alignment
#ifndef WTF_CHANGES
if (use_devmem && !devmem_failure) {
- void* result = TryDevMem(size, alignment);
+ void* result = TryDevMem(size, actual_size, alignment);
if (result != NULL) return result;
}
#endif
#if HAVE(SBRK)
if (use_sbrk && !sbrk_failure) {
- void* result = TrySbrk(size, alignment);
+ void* result = TrySbrk(size, actual_size, alignment);
if (result != NULL) return result;
}
#endif
#if HAVE(MMAP)
if (use_mmap && !mmap_failure) {
- void* result = TryMmap(size, alignment);
+ void* result = TryMmap(size, actual_size, alignment);
if (result != NULL) return result;
}
#endif
#if HAVE(VIRTUALALLOC)
if (use_VirtualAlloc && !VirtualAlloc_failure) {
- void* result = TryVirtualAlloc(size, alignment);
+ void* result = TryVirtualAlloc(size, actual_size, alignment);
if (result != NULL) return result;
}
#endif
// REQUIRES: "alignment" is a power of two or "0" to indicate default alignment
//
-// Allocate and return "N" bytes of zeroed memory. The returned
-// pointer is a multiple of "alignment" if non-zero. Returns NULL
-// when out of memory.
-extern void* TCMalloc_SystemAlloc(size_t bytes, size_t alignment = 0);
+// Allocate and return "N" bytes of zeroed memory.
+//
+// If actual_bytes is NULL then the returned memory is exactly the
+// requested size. If actual bytes is non-NULL then the allocator
+// may optionally return more bytes than asked for (i.e. return an
+// entire "huge" page if a huge page allocator is in use).
+//
+// The returned pointer is a multiple of "alignment" if non-zero.
+//
+// Returns NULL when out of memory.
+extern void* TCMalloc_SystemAlloc(size_t bytes, size_t *actual_bytes,
+ size_t alignment = 0);
+
+// This call is a hint to the operating system that the pages
+// contained in the specified range of memory will not be used for a
+// while, and can be released for use by other processes or the OS.
+// Pages which are released in this way may be destroyed (zeroed) by
+// the OS. The benefit of this function is that it frees memory for
+// use by the system, the cost is that the pages are faulted back into
+// the address space next time they are touched, which can impact
+// performance. (Only pages fully covered by the memory region will
+// be released, partial pages will not.)
+extern void TCMalloc_SystemRelease(void* start, size_t length);
#endif /* TCMALLOC_SYSTEM_ALLOC_H__ */