5f7b93bbc8239ec1f0e364b057f69dc56f4fd6a9
[WebKit-https.git] / Source / bmalloc / bmalloc / Heap.cpp
1 /*
2  * Copyright (C) 2014-2018 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "Heap.h"
27
28 #include "AvailableMemory.h"
29 #include "BulkDecommit.h"
30 #include "BumpAllocator.h"
31 #include "Chunk.h"
32 #include "CryptoRandom.h"
33 #include "Environment.h"
34 #include "Gigacage.h"
35 #include "DebugHeap.h"
36 #include "PerProcess.h"
37 #include "Scavenger.h"
38 #include "SmallLine.h"
39 #include "SmallPage.h"
40 #include "VMHeap.h"
41 #include "bmalloc.h"
42 #include <thread>
43 #include <vector>
44
45 namespace bmalloc {
46
47 static_assert(isPowerOfTwo(smallPageSize), "");
48
49 Heap::Heap(HeapKind kind, std::lock_guard<Mutex>&)
50     : m_kind(kind)
51     , m_vmPageSizePhysical(vmPageSizePhysical())
52     , m_debugHeap(nullptr)
53 {
54     RELEASE_BASSERT(vmPageSizePhysical() >= smallPageSize);
55     RELEASE_BASSERT(vmPageSize() >= vmPageSizePhysical());
56
57     initializeLineMetadata();
58     initializePageMetadata();
59     
60     if (PerProcess<Environment>::get()->isDebugHeapEnabled())
61         m_debugHeap = PerProcess<DebugHeap>::get();
62     else {
63         Gigacage::ensureGigacage();
64 #if GIGACAGE_ENABLED
65         if (usingGigacage()) {
66             RELEASE_BASSERT(gigacageBasePtr());
67             uint64_t random[2];
68             cryptoRandom(reinterpret_cast<unsigned char*>(random), sizeof(random));
69             size_t size = roundDownToMultipleOf(vmPageSize(), gigacageSize() - (random[0] % Gigacage::maximumCageSizeReductionForSlide));
70             ptrdiff_t offset = roundDownToMultipleOf(vmPageSize(), random[1] % (gigacageSize() - size));
71             void* base = reinterpret_cast<unsigned char*>(gigacageBasePtr()) + offset;
72             m_largeFree.add(LargeRange(base, size, 0, 0));
73         }
74 #endif
75     }
76     
77     m_scavenger = PerProcess<Scavenger>::get();
78 }
79
80 bool Heap::usingGigacage()
81 {
82     return isGigacage(m_kind) && gigacageBasePtr();
83 }
84
85 void* Heap::gigacageBasePtr()
86 {
87     return Gigacage::basePtr(gigacageKind(m_kind));
88 }
89
90 size_t Heap::gigacageSize()
91 {
92     return Gigacage::size(gigacageKind(m_kind));
93 }
94
95 void Heap::initializeLineMetadata()
96 {
97     size_t sizeClassCount = bmalloc::sizeClass(smallLineSize);
98     size_t smallLineCount = smallPageSize / smallLineSize;
99     m_smallLineMetadata.grow(sizeClassCount * smallLineCount);
100
101     for (size_t sizeClass = 0; sizeClass < sizeClassCount; ++sizeClass) {
102         size_t size = objectSize(sizeClass);
103         LineMetadata* pageMetadata = &m_smallLineMetadata[sizeClass * smallLineCount];
104
105         size_t object = 0;
106         size_t line = 0;
107         while (object < smallPageSize) {
108             line = object / smallLineSize;
109             size_t leftover = object % smallLineSize;
110
111             size_t objectCount;
112             size_t remainder;
113             divideRoundingUp(smallLineSize - leftover, size, objectCount, remainder);
114
115             pageMetadata[line] = { static_cast<unsigned char>(leftover), static_cast<unsigned char>(objectCount) };
116
117             object += objectCount * size;
118         }
119
120         // Don't allow the last object in a page to escape the page.
121         if (object > smallPageSize) {
122             BASSERT(pageMetadata[line].objectCount);
123             --pageMetadata[line].objectCount;
124         }
125     }
126 }
127
128 void Heap::initializePageMetadata()
129 {
130     auto computePageSize = [&](size_t sizeClass) {
131         size_t size = objectSize(sizeClass);
132         if (sizeClass < bmalloc::sizeClass(smallLineSize))
133             return smallPageSize;
134
135         // We want power of 2 pageSizes sizes below physical page size and multiples of physical pages size above that.
136         size_t pageSize = smallPageSize;
137         for (; pageSize < m_vmPageSizePhysical; pageSize *= 2) {
138             RELEASE_BASSERT(pageSize <= chunkSize / 2);
139             size_t waste = pageSize % size;
140             if (waste <= pageSize / pageSizeWasteFactor)
141                 return pageSize;
142         }
143
144         for (; pageSize < pageSizeMax; pageSize += m_vmPageSizePhysical) {
145             RELEASE_BASSERT(pageSize <= chunkSize / 2);
146             size_t waste = pageSize % size;
147             if (waste <= pageSize / pageSizeWasteFactor)
148                 return pageSize;
149         }
150         
151         return pageSizeMax;
152     };
153
154     for (size_t i = 0; i < sizeClassCount; ++i)
155         m_pageClasses[i] = (computePageSize(i) - 1) / smallPageSize;
156 }
157
158 size_t Heap::freeableMemory(std::lock_guard<Mutex>&)
159 {
160     return m_freeableMemory;
161 }
162
163 size_t Heap::footprint()
164 {
165     BASSERT(!m_debugHeap);
166     return m_footprint;
167 }
168
169 void Heap::markAllLargeAsEligibile(std::lock_guard<Mutex>&)
170 {
171     m_largeFree.markAllAsEligibile();
172     m_hasPendingDecommits = false;
173     m_condition.notify_all();
174 }
175
176 void Heap::decommitLargeRange(std::lock_guard<Mutex>&, LargeRange& range, BulkDecommit& decommitter)
177 {
178     m_footprint -= range.totalPhysicalSize();
179     m_freeableMemory -= range.totalPhysicalSize();
180     decommitter.addLazy(range.begin(), range.size());
181     m_hasPendingDecommits = true;
182     range.setStartPhysicalSize(0);
183     range.setTotalPhysicalSize(0);
184     BASSERT(range.isEligibile());
185     range.setEligible(false);
186 #if ENABLE_PHYSICAL_PAGE_MAP 
187     m_physicalPageMap.decommit(range.begin(), range.size());
188 #endif
189 }
190
191 void Heap::scavenge(std::lock_guard<Mutex>& lock, BulkDecommit& decommitter)
192 {
193     for (auto& list : m_freePages) {
194         for (auto* chunk : list) {
195             for (auto* page : chunk->freePages()) {
196                 if (!page->hasPhysicalPages())
197                     continue;
198
199                 size_t pageSize = bmalloc::pageSize(&list - &m_freePages[0]);
200                 if (pageSize >= m_vmPageSizePhysical) {
201                     size_t decommitSize = physicalPageSizeSloppy(page->begin()->begin(), pageSize);
202                     m_freeableMemory -= decommitSize;
203                     m_footprint -= decommitSize;
204                     decommitter.addEager(page->begin()->begin(), pageSize);
205                     page->setHasPhysicalPages(false);
206 #if ENABLE_PHYSICAL_PAGE_MAP
207                     m_physicalPageMap.decommit(page->begin()->begin(), pageSize);
208 #endif
209                 } else
210                     tryDecommitSmallPagesInPhysicalPage(lock, decommitter, page, pageSize);
211             }
212         }
213     }
214     
215     for (auto& list : m_chunkCache) {
216         while (!list.isEmpty())
217             deallocateSmallChunk(list.pop(), &list - &m_chunkCache[0]);
218     }
219
220     for (LargeRange& range : m_largeFree) {
221         m_highWatermark = std::min(m_highWatermark, static_cast<void*>(range.begin()));
222         decommitLargeRange(lock, range, decommitter);
223     }
224
225     m_freeableMemory = 0;
226 }
227
228 void Heap::scavengeToHighWatermark(std::lock_guard<Mutex>& lock, BulkDecommit& decommitter)
229 {
230     void* newHighWaterMark = nullptr;
231     for (LargeRange& range : m_largeFree) {
232         if (range.begin() <= m_highWatermark)
233             newHighWaterMark = std::min(newHighWaterMark, static_cast<void*>(range.begin()));
234         else
235             decommitLargeRange(lock, range, decommitter);
236     }
237     m_highWatermark = newHighWaterMark;
238 }
239
240 void Heap::deallocateLineCache(std::unique_lock<Mutex>&, LineCache& lineCache)
241 {
242     for (auto& list : lineCache) {
243         while (!list.isEmpty()) {
244             size_t sizeClass = &list - &lineCache[0];
245             m_lineCache[sizeClass].push(list.popFront());
246         }
247     }
248 }
249
250 void Heap::allocateSmallChunk(std::unique_lock<Mutex>& lock, size_t pageClass)
251 {
252     RELEASE_BASSERT(isActiveHeapKind(m_kind));
253     
254     size_t pageSize = bmalloc::pageSize(pageClass);
255
256     Chunk* chunk = [&]() {
257         if (!m_chunkCache[pageClass].isEmpty())
258             return m_chunkCache[pageClass].pop();
259
260         void* memory = allocateLarge(lock, chunkSize, chunkSize);
261
262         Chunk* chunk = new (memory) Chunk(pageSize);
263
264         m_objectTypes.set(chunk, ObjectType::Small);
265
266         forEachPage(chunk, pageSize, [&](SmallPage* page) {
267             page->setHasPhysicalPages(true);
268             page->setHasFreeLines(lock, true);
269             chunk->freePages().push(page);
270         });
271
272         m_freeableMemory += chunkSize;
273         
274         m_scavenger->schedule(0);
275
276         return chunk;
277     }();
278     
279     m_freePages[pageClass].push(chunk);
280 }
281
282 void Heap::tryDecommitSmallPagesInPhysicalPage(std::lock_guard<Mutex>& lock, BulkDecommit& decommitter, SmallPage* smallPage, size_t pageSize)
283 {
284     Chunk* chunk = Chunk::get(smallPage);
285
286     char* pageBegin = smallPage->begin()->begin();
287     char* physicalPageBegin = roundDownToMultipleOf(m_vmPageSizePhysical, pageBegin);
288
289     // The first page in a physical page takes care of decommitting its physical neighbors
290     if (pageBegin != physicalPageBegin)
291         return;
292
293     size_t beginPageOffset = chunk->offset(physicalPageBegin);
294     size_t endPageOffset = beginPageOffset + m_vmPageSizePhysical;
295
296     Object begin(chunk, beginPageOffset);
297     Object end(chunk, endPageOffset);
298
299     for (auto it = begin; it + pageSize <= end; it = it + pageSize) {
300         if (it.page()->refCount(lock))
301             return;
302     }
303
304     size_t decommitSize = m_vmPageSizePhysical;
305     m_freeableMemory -= decommitSize;
306     m_footprint -= decommitSize;
307
308     decommitter.addEager(physicalPageBegin, decommitSize);
309
310     for (auto it = begin; it + pageSize <= end; it = it + pageSize)
311         it.page()->setHasPhysicalPages(false);
312 #if ENABLE_PHYSICAL_PAGE_MAP
313     m_physicalPageMap.decommit(smallPage, decommitSize);
314 #endif
315 }
316
317 void Heap::commitSmallPagesInPhysicalPage(std::unique_lock<Mutex>&, SmallPage* page, size_t pageSize)
318 {
319     Chunk* chunk = Chunk::get(page);
320
321     char* physicalPageBegin = roundDownToMultipleOf(m_vmPageSizePhysical, page->begin()->begin());
322
323     size_t beginPageOffset = chunk->offset(physicalPageBegin);
324     size_t endPageOffset = beginPageOffset + m_vmPageSizePhysical;
325
326     Object begin(chunk, beginPageOffset);
327     Object end(chunk, endPageOffset);
328
329     m_footprint += m_vmPageSizePhysical;
330     vmAllocatePhysicalPagesSloppy(physicalPageBegin, m_vmPageSizePhysical);
331
332     for (auto it = begin; it + pageSize <= end; it = it + pageSize)
333         it.page()->setHasPhysicalPages(true);
334 #if ENABLE_PHYSICAL_PAGE_MAP
335     m_physicalPageMap.commit(begin.page(), m_vmPageSizePhysical);
336 #endif
337 }
338
339 void Heap::deallocateSmallChunk(Chunk* chunk, size_t pageClass)
340 {
341     m_objectTypes.set(chunk, ObjectType::Large);
342     
343     size_t size = m_largeAllocated.remove(chunk);
344     size_t totalPhysicalSize = size;
345
346     size_t accountedInFreeable = 0;
347
348     bool hasPhysicalPages = true;
349     forEachPage(chunk, pageSize(pageClass), [&](SmallPage* page) {
350         size_t physicalSize = physicalPageSizeSloppy(page->begin()->begin(), pageSize(pageClass));
351         if (!page->hasPhysicalPages()) {
352             totalPhysicalSize -= physicalSize;
353             hasPhysicalPages = false;
354         } else
355             accountedInFreeable += physicalSize;
356     });
357
358     m_freeableMemory -= accountedInFreeable;
359     m_freeableMemory += totalPhysicalSize;
360
361     size_t startPhysicalSize = hasPhysicalPages ? size : 0;
362     m_largeFree.add(LargeRange(chunk, size, startPhysicalSize, totalPhysicalSize));
363 }
364
365 SmallPage* Heap::allocateSmallPage(std::unique_lock<Mutex>& lock, size_t sizeClass, LineCache& lineCache)
366 {
367     RELEASE_BASSERT(isActiveHeapKind(m_kind));
368
369     if (!lineCache[sizeClass].isEmpty())
370         return lineCache[sizeClass].popFront();
371
372     if (!m_lineCache[sizeClass].isEmpty())
373         return m_lineCache[sizeClass].popFront();
374
375     m_scavenger->didStartGrowing();
376     
377     SmallPage* page = [&]() {
378         size_t pageClass = m_pageClasses[sizeClass];
379         
380         if (m_freePages[pageClass].isEmpty())
381             allocateSmallChunk(lock, pageClass);
382
383         Chunk* chunk = m_freePages[pageClass].tail();
384
385         chunk->ref();
386
387         SmallPage* page = chunk->freePages().pop();
388         if (chunk->freePages().isEmpty())
389             m_freePages[pageClass].remove(chunk);
390
391         size_t pageSize = bmalloc::pageSize(pageClass);
392         size_t physicalSize = physicalPageSizeSloppy(page->begin()->begin(), pageSize);
393         if (page->hasPhysicalPages())
394             m_freeableMemory -= physicalSize;
395         else {
396             m_scavenger->scheduleIfUnderMemoryPressure(pageSize);
397             if (pageSize >= m_vmPageSizePhysical) {
398                 m_footprint += physicalSize;
399                 vmAllocatePhysicalPagesSloppy(page->begin()->begin(), pageSize);
400                 page->setHasPhysicalPages(true);
401 #if ENABLE_PHYSICAL_PAGE_MAP
402                 m_physicalPageMap.commit(page->begin()->begin(), pageSize);
403 #endif
404             } else
405                 commitSmallPagesInPhysicalPage(lock, page, pageSize);
406         }
407
408         return page;
409     }();
410
411     page->setSizeClass(sizeClass);
412     return page;
413 }
414
415 void Heap::deallocateSmallLine(std::unique_lock<Mutex>& lock, Object object, LineCache& lineCache)
416 {
417     BASSERT(!object.line()->refCount(lock));
418     SmallPage* page = object.page();
419     page->deref(lock);
420
421     if (!page->hasFreeLines(lock)) {
422         page->setHasFreeLines(lock, true);
423         lineCache[page->sizeClass()].push(page);
424     }
425
426     if (page->refCount(lock))
427         return;
428
429     size_t sizeClass = page->sizeClass();
430     size_t pageClass = m_pageClasses[sizeClass];
431
432     m_freeableMemory += physicalPageSizeSloppy(page->begin()->begin(), pageSize(pageClass));
433
434     List<SmallPage>::remove(page); // 'page' may be in any thread's line cache.
435     
436     Chunk* chunk = Chunk::get(page);
437     if (chunk->freePages().isEmpty())
438         m_freePages[pageClass].push(chunk);
439     chunk->freePages().push(page);
440
441     chunk->deref();
442
443     if (!chunk->refCount()) {
444         m_freePages[pageClass].remove(chunk);
445
446         if (!m_chunkCache[pageClass].isEmpty())
447             deallocateSmallChunk(m_chunkCache[pageClass].pop(), pageClass);
448
449         m_chunkCache[pageClass].push(chunk);
450     }
451     
452     m_scavenger->schedule(pageSize(pageClass));
453 }
454
455 void Heap::allocateSmallBumpRangesByMetadata(
456     std::unique_lock<Mutex>& lock, size_t sizeClass,
457     BumpAllocator& allocator, BumpRangeCache& rangeCache,
458     LineCache& lineCache)
459 {
460     RELEASE_BASSERT(isActiveHeapKind(m_kind));
461
462     SmallPage* page = allocateSmallPage(lock, sizeClass, lineCache);
463     SmallLine* lines = page->begin();
464     BASSERT(page->hasFreeLines(lock));
465     size_t smallLineCount = smallPageSize / smallLineSize;
466     LineMetadata* pageMetadata = &m_smallLineMetadata[sizeClass * smallLineCount];
467     
468     auto findSmallBumpRange = [&](size_t& lineNumber) {
469         for ( ; lineNumber < smallLineCount; ++lineNumber) {
470             if (!lines[lineNumber].refCount(lock)) {
471                 if (pageMetadata[lineNumber].objectCount)
472                     return true;
473             }
474         }
475         return false;
476     };
477
478     auto allocateSmallBumpRange = [&](size_t& lineNumber) -> BumpRange {
479         char* begin = lines[lineNumber].begin() + pageMetadata[lineNumber].startOffset;
480         unsigned short objectCount = 0;
481         
482         for ( ; lineNumber < smallLineCount; ++lineNumber) {
483             if (lines[lineNumber].refCount(lock))
484                 break;
485
486             if (!pageMetadata[lineNumber].objectCount)
487                 continue;
488
489             objectCount += pageMetadata[lineNumber].objectCount;
490             lines[lineNumber].ref(lock, pageMetadata[lineNumber].objectCount);
491             page->ref(lock);
492         }
493         return { begin, objectCount };
494     };
495
496     size_t lineNumber = 0;
497     for (;;) {
498         if (!findSmallBumpRange(lineNumber)) {
499             page->setHasFreeLines(lock, false);
500             BASSERT(allocator.canAllocate());
501             return;
502         }
503
504         // In a fragmented page, some free ranges might not fit in the cache.
505         if (rangeCache.size() == rangeCache.capacity()) {
506             lineCache[sizeClass].push(page);
507             BASSERT(allocator.canAllocate());
508             return;
509         }
510
511         BumpRange bumpRange = allocateSmallBumpRange(lineNumber);
512         if (allocator.canAllocate())
513             rangeCache.push(bumpRange);
514         else
515             allocator.refill(bumpRange);
516     }
517 }
518
519 void Heap::allocateSmallBumpRangesByObject(
520     std::unique_lock<Mutex>& lock, size_t sizeClass,
521     BumpAllocator& allocator, BumpRangeCache& rangeCache,
522     LineCache& lineCache)
523 {
524     RELEASE_BASSERT(isActiveHeapKind(m_kind));
525
526     size_t size = allocator.size();
527     SmallPage* page = allocateSmallPage(lock, sizeClass, lineCache);
528     BASSERT(page->hasFreeLines(lock));
529
530     auto findSmallBumpRange = [&](Object& it, Object& end) {
531         for ( ; it + size <= end; it = it + size) {
532             if (!it.line()->refCount(lock))
533                 return true;
534         }
535         return false;
536     };
537
538     auto allocateSmallBumpRange = [&](Object& it, Object& end) -> BumpRange {
539         char* begin = it.address();
540         unsigned short objectCount = 0;
541         for ( ; it + size <= end; it = it + size) {
542             if (it.line()->refCount(lock))
543                 break;
544
545             ++objectCount;
546             it.line()->ref(lock);
547             it.page()->ref(lock);
548         }
549         return { begin, objectCount };
550     };
551
552     Object it(page->begin()->begin());
553     Object end(it + pageSize(m_pageClasses[sizeClass]));
554     for (;;) {
555         if (!findSmallBumpRange(it, end)) {
556             page->setHasFreeLines(lock, false);
557             BASSERT(allocator.canAllocate());
558             return;
559         }
560
561         // In a fragmented page, some free ranges might not fit in the cache.
562         if (rangeCache.size() == rangeCache.capacity()) {
563             lineCache[sizeClass].push(page);
564             BASSERT(allocator.canAllocate());
565             return;
566         }
567
568         BumpRange bumpRange = allocateSmallBumpRange(it, end);
569         if (allocator.canAllocate())
570             rangeCache.push(bumpRange);
571         else
572             allocator.refill(bumpRange);
573     }
574 }
575
576 LargeRange Heap::splitAndAllocate(std::unique_lock<Mutex>&, LargeRange& range, size_t alignment, size_t size)
577 {
578     RELEASE_BASSERT(isActiveHeapKind(m_kind));
579
580     LargeRange prev;
581     LargeRange next;
582
583     size_t alignmentMask = alignment - 1;
584     if (test(range.begin(), alignmentMask)) {
585         size_t prefixSize = roundUpToMultipleOf(alignment, range.begin()) - range.begin();
586         std::pair<LargeRange, LargeRange> pair = range.split(prefixSize);
587         prev = pair.first;
588         range = pair.second;
589     }
590
591     if (range.size() - size > size / pageSizeWasteFactor) {
592         std::pair<LargeRange, LargeRange> pair = range.split(size);
593         range = pair.first;
594         next = pair.second;
595     }
596     
597     if (range.startPhysicalSize() < range.size()) {
598         m_scavenger->scheduleIfUnderMemoryPressure(range.size());
599         m_footprint += range.size() - range.totalPhysicalSize();
600         vmAllocatePhysicalPagesSloppy(range.begin() + range.startPhysicalSize(), range.size() - range.startPhysicalSize());
601         range.setStartPhysicalSize(range.size());
602         range.setTotalPhysicalSize(range.size());
603 #if ENABLE_PHYSICAL_PAGE_MAP 
604         m_physicalPageMap.commit(range.begin(), range.size());
605 #endif
606     }
607     
608     if (prev) {
609         m_freeableMemory += prev.totalPhysicalSize();
610         m_largeFree.add(prev);
611     }
612
613     if (next) {
614         m_freeableMemory += next.totalPhysicalSize();
615         m_largeFree.add(next);
616     }
617
618     m_objectTypes.set(Chunk::get(range.begin()), ObjectType::Large);
619
620     m_largeAllocated.set(range.begin(), range.size());
621     return range;
622 }
623
624 void* Heap::tryAllocateLarge(std::unique_lock<Mutex>& lock, size_t alignment, size_t size)
625 {
626     RELEASE_BASSERT(isActiveHeapKind(m_kind));
627
628     BASSERT(isPowerOfTwo(alignment));
629     
630     if (m_debugHeap)
631         return m_debugHeap->memalignLarge(alignment, size);
632     
633     m_scavenger->didStartGrowing();
634     
635     size_t roundedSize = size ? roundUpToMultipleOf(largeAlignment, size) : largeAlignment;
636     if (roundedSize < size) // Check for overflow
637         return nullptr;
638     size = roundedSize;
639
640     size_t roundedAlignment = roundUpToMultipleOf<largeAlignment>(alignment);
641     if (roundedAlignment < alignment) // Check for overflow
642         return nullptr;
643     alignment = roundedAlignment;
644
645     LargeRange range = m_largeFree.remove(alignment, size);
646     if (!range) {
647         if (m_hasPendingDecommits) {
648             m_condition.wait(lock, [&]() { return !m_hasPendingDecommits; });
649             // Now we're guaranteed we're looking at all available memory.
650             return tryAllocateLarge(lock, alignment, size);
651         }
652
653         if (usingGigacage())
654             return nullptr;
655
656         range = PerProcess<VMHeap>::get()->tryAllocateLargeChunk(alignment, size);
657         if (!range)
658             return nullptr;
659         
660         m_largeFree.add(range);
661         range = m_largeFree.remove(alignment, size);
662     }
663
664     m_freeableMemory -= range.totalPhysicalSize();
665
666     void* result = splitAndAllocate(lock, range, alignment, size).begin();
667     m_highWatermark = std::max(m_highWatermark, result);
668     return result;
669 }
670
671 void* Heap::allocateLarge(std::unique_lock<Mutex>& lock, size_t alignment, size_t size)
672 {
673     void* result = tryAllocateLarge(lock, alignment, size);
674     RELEASE_BASSERT(result);
675     return result;
676 }
677
678 bool Heap::isLarge(std::unique_lock<Mutex>&, void* object)
679 {
680     return m_objectTypes.get(Object(object).chunk()) == ObjectType::Large;
681 }
682
683 size_t Heap::largeSize(std::unique_lock<Mutex>&, void* object)
684 {
685     return m_largeAllocated.get(object);
686 }
687
688 void Heap::shrinkLarge(std::unique_lock<Mutex>& lock, const Range& object, size_t newSize)
689 {
690     BASSERT(object.size() > newSize);
691
692     size_t size = m_largeAllocated.remove(object.begin());
693     LargeRange range = LargeRange(object, size, size);
694     splitAndAllocate(lock, range, alignment, newSize);
695
696     m_scavenger->schedule(size);
697 }
698
699 void Heap::deallocateLarge(std::unique_lock<Mutex>&, void* object)
700 {
701     if (m_debugHeap)
702         return m_debugHeap->freeLarge(object);
703
704     size_t size = m_largeAllocated.remove(object);
705     m_largeFree.add(LargeRange(object, size, size, size));
706     m_freeableMemory += size;
707     m_scavenger->schedule(size);
708 }
709
710 void Heap::externalCommit(void* ptr, size_t size)
711 {
712     std::unique_lock<Mutex> lock(Heap::mutex());
713     externalCommit(lock, ptr, size);
714 }
715
716 void Heap::externalCommit(std::unique_lock<Mutex>&, void* ptr, size_t size)
717 {
718     BUNUSED_PARAM(ptr);
719
720     m_footprint += size;
721 #if ENABLE_PHYSICAL_PAGE_MAP 
722     m_physicalPageMap.commit(ptr, size);
723 #endif
724 }
725
726 void Heap::externalDecommit(void* ptr, size_t size)
727 {
728     std::unique_lock<Mutex> lock(Heap::mutex());
729     externalDecommit(lock, ptr, size);
730 }
731
732 void Heap::externalDecommit(std::unique_lock<Mutex>&, void* ptr, size_t size)
733 {
734     BUNUSED_PARAM(ptr);
735
736     m_footprint -= size;
737 #if ENABLE_PHYSICAL_PAGE_MAP 
738     m_physicalPageMap.decommit(ptr, size);
739 #endif
740 }
741
742 } // namespace bmalloc