[BMalloc] Scavenger should react to recent memory activity
[WebKit-https.git] / Source / bmalloc / bmalloc / Heap.cpp
1 /*
2  * Copyright (C) 2014-2018 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "Heap.h"
27
28 #include "AvailableMemory.h"
29 #include "BulkDecommit.h"
30 #include "BumpAllocator.h"
31 #include "Chunk.h"
32 #include "CryptoRandom.h"
33 #include "Environment.h"
34 #include "Gigacage.h"
35 #include "DebugHeap.h"
36 #include "PerProcess.h"
37 #include "Scavenger.h"
38 #include "SmallLine.h"
39 #include "SmallPage.h"
40 #include "VMHeap.h"
41 #include "bmalloc.h"
42 #include <thread>
43 #include <vector>
44
45 namespace bmalloc {
46
47 Heap::Heap(HeapKind kind, std::lock_guard<Mutex>&)
48     : m_kind(kind)
49     , m_vmPageSizePhysical(vmPageSizePhysical())
50 {
51     RELEASE_BASSERT(vmPageSizePhysical() >= smallPageSize);
52     RELEASE_BASSERT(vmPageSize() >= vmPageSizePhysical());
53
54     initializeLineMetadata();
55     initializePageMetadata();
56     
57     BASSERT(!Environment::get()->isDebugHeapEnabled());
58
59     Gigacage::ensureGigacage();
60 #if GIGACAGE_ENABLED
61     if (usingGigacage()) {
62         RELEASE_BASSERT(gigacageBasePtr());
63         uint64_t random[2];
64         cryptoRandom(reinterpret_cast<unsigned char*>(random), sizeof(random));
65         size_t size = roundDownToMultipleOf(vmPageSize(), gigacageSize() - (random[0] % Gigacage::maximumCageSizeReductionForSlide));
66         ptrdiff_t offset = roundDownToMultipleOf(vmPageSize(), random[1] % (gigacageSize() - size));
67         void* base = reinterpret_cast<unsigned char*>(gigacageBasePtr()) + offset;
68         m_largeFree.add(LargeRange(base, size, 0, 0));
69     }
70 #endif
71     
72     m_scavenger = Scavenger::get();
73 }
74
75 bool Heap::usingGigacage()
76 {
77     return isGigacage(m_kind) && gigacageBasePtr();
78 }
79
80 void* Heap::gigacageBasePtr()
81 {
82     return Gigacage::basePtr(gigacageKind(m_kind));
83 }
84
85 size_t Heap::gigacageSize()
86 {
87     return Gigacage::size(gigacageKind(m_kind));
88 }
89
90 void Heap::initializeLineMetadata()
91 {
92     size_t sizeClassCount = bmalloc::sizeClass(smallLineSize);
93     size_t smallLineCount = m_vmPageSizePhysical / smallLineSize;
94     m_smallLineMetadata.grow(sizeClassCount * smallLineCount);
95
96     for (size_t sizeClass = 0; sizeClass < sizeClassCount; ++sizeClass) {
97         size_t size = objectSize(sizeClass);
98         LineMetadata* pageMetadata = &m_smallLineMetadata[sizeClass * smallLineCount];
99
100         size_t object = 0;
101         size_t line = 0;
102         while (object < m_vmPageSizePhysical) {
103             line = object / smallLineSize;
104             size_t leftover = object % smallLineSize;
105
106             size_t objectCount;
107             size_t remainder;
108             divideRoundingUp(smallLineSize - leftover, size, objectCount, remainder);
109
110             pageMetadata[line] = { static_cast<unsigned char>(leftover), static_cast<unsigned char>(objectCount) };
111
112             object += objectCount * size;
113         }
114
115         // Don't allow the last object in a page to escape the page.
116         if (object > m_vmPageSizePhysical) {
117             BASSERT(pageMetadata[line].objectCount);
118             --pageMetadata[line].objectCount;
119         }
120     }
121 }
122
123 void Heap::initializePageMetadata()
124 {
125     auto computePageSize = [&](size_t sizeClass) {
126         size_t size = objectSize(sizeClass);
127         if (sizeClass < bmalloc::sizeClass(smallLineSize))
128             return m_vmPageSizePhysical;
129
130         for (size_t pageSize = m_vmPageSizePhysical;
131             pageSize < pageSizeMax;
132             pageSize += m_vmPageSizePhysical) {
133             RELEASE_BASSERT(pageSize <= chunkSize / 2);
134             size_t waste = pageSize % size;
135             if (waste <= pageSize / pageSizeWasteFactor)
136                 return pageSize;
137         }
138         
139         return pageSizeMax;
140     };
141
142     for (size_t i = 0; i < sizeClassCount; ++i)
143         m_pageClasses[i] = (computePageSize(i) - 1) / smallPageSize;
144 }
145
146 size_t Heap::freeableMemory(std::lock_guard<Mutex>&)
147 {
148     return m_freeableMemory;
149 }
150
151 size_t Heap::footprint()
152 {
153     return m_footprint;
154 }
155
156 void Heap::markAllLargeAsEligibile(std::lock_guard<Mutex>&)
157 {
158     m_largeFree.markAllAsEligibile();
159     m_hasPendingDecommits = false;
160     m_condition.notify_all();
161 }
162
163 void Heap::decommitLargeRange(std::lock_guard<Mutex>&, LargeRange& range, BulkDecommit& decommitter)
164 {
165     m_footprint -= range.totalPhysicalSize();
166     m_freeableMemory -= range.totalPhysicalSize();
167     decommitter.addLazy(range.begin(), range.size());
168     m_hasPendingDecommits = true;
169     range.setStartPhysicalSize(0);
170     range.setTotalPhysicalSize(0);
171     BASSERT(range.isEligibile());
172     range.setEligible(false);
173 #if ENABLE_PHYSICAL_PAGE_MAP 
174     m_physicalPageMap.decommit(range.begin(), range.size());
175 #endif
176 }
177
178 void Heap::scavenge(std::lock_guard<Mutex>& lock, BulkDecommit& decommitter, size_t& deferredDecommits)
179 {
180     for (auto& list : m_freePages) {
181         for (auto* chunk : list) {
182             for (auto* page : chunk->freePages()) {
183                 if (!page->hasPhysicalPages())
184                     continue;
185                 if (page->usedSinceLastScavenge()) {
186                     page->clearUsedSinceLastScavenge();
187                     deferredDecommits++;
188                     continue;
189                 }
190
191                 size_t pageSize = bmalloc::pageSize(&list - &m_freePages[0]);
192                 size_t decommitSize = physicalPageSizeSloppy(page->begin()->begin(), pageSize);
193                 m_freeableMemory -= decommitSize;
194                 m_footprint -= decommitSize;
195                 decommitter.addEager(page->begin()->begin(), pageSize);
196                 page->setHasPhysicalPages(false);
197 #if ENABLE_PHYSICAL_PAGE_MAP
198                 m_physicalPageMap.decommit(page->begin()->begin(), pageSize);
199 #endif
200             }
201         }
202     }
203
204     for (auto& list : m_chunkCache) {
205         for (auto iter = list.begin(); iter != list.end(); ) {
206             Chunk* chunk = *iter;
207             if (chunk->usedSinceLastScavenge()) {
208                 chunk->clearUsedSinceLastScavenge();
209                 deferredDecommits++;
210                 ++iter;
211                 continue;
212             }
213             ++iter;
214             list.remove(chunk);
215             deallocateSmallChunk(chunk, &list - &m_chunkCache[0]);
216         }
217     }
218
219     for (LargeRange& range : m_largeFree) {
220         if (range.usedSinceLastScavenge()) {
221             range.clearUsedSinceLastScavenge();
222             deferredDecommits++;
223             continue;
224         }
225         decommitLargeRange(lock, range, decommitter);
226     }
227 }
228
229 void Heap::deallocateLineCache(std::unique_lock<Mutex>&, LineCache& lineCache)
230 {
231     for (auto& list : lineCache) {
232         while (!list.isEmpty()) {
233             size_t sizeClass = &list - &lineCache[0];
234             m_lineCache[sizeClass].push(list.popFront());
235         }
236     }
237 }
238
239 void Heap::allocateSmallChunk(std::unique_lock<Mutex>& lock, size_t pageClass)
240 {
241     RELEASE_BASSERT(isActiveHeapKind(m_kind));
242     
243     size_t pageSize = bmalloc::pageSize(pageClass);
244
245     Chunk* chunk = [&]() {
246         if (!m_chunkCache[pageClass].isEmpty())
247             return m_chunkCache[pageClass].pop();
248
249         void* memory = allocateLarge(lock, chunkSize, chunkSize);
250
251         Chunk* chunk = new (memory) Chunk(pageSize);
252
253         m_objectTypes.set(chunk, ObjectType::Small);
254
255         forEachPage(chunk, pageSize, [&](SmallPage* page) {
256             page->setHasPhysicalPages(true);
257             page->setUsedSinceLastScavenge();
258             page->setHasFreeLines(lock, true);
259             chunk->freePages().push(page);
260         });
261
262         m_freeableMemory += chunkSize;
263         
264         m_scavenger->schedule(0);
265
266         return chunk;
267     }();
268     
269     m_freePages[pageClass].push(chunk);
270 }
271
272 void Heap::deallocateSmallChunk(Chunk* chunk, size_t pageClass)
273 {
274     m_objectTypes.set(chunk, ObjectType::Large);
275     
276     size_t size = m_largeAllocated.remove(chunk);
277     size_t totalPhysicalSize = size;
278
279     size_t accountedInFreeable = 0;
280
281     bool hasPhysicalPages = true;
282     forEachPage(chunk, pageSize(pageClass), [&](SmallPage* page) {
283         size_t physicalSize = physicalPageSizeSloppy(page->begin()->begin(), pageSize(pageClass));
284         if (!page->hasPhysicalPages()) {
285             totalPhysicalSize -= physicalSize;
286             hasPhysicalPages = false;
287         } else
288             accountedInFreeable += physicalSize;
289     });
290
291     m_freeableMemory -= accountedInFreeable;
292     m_freeableMemory += totalPhysicalSize;
293
294     size_t startPhysicalSize = hasPhysicalPages ? size : 0;
295     m_largeFree.add(LargeRange(chunk, size, startPhysicalSize, totalPhysicalSize));
296 }
297
298 SmallPage* Heap::allocateSmallPage(std::unique_lock<Mutex>& lock, size_t sizeClass, LineCache& lineCache)
299 {
300     RELEASE_BASSERT(isActiveHeapKind(m_kind));
301
302     if (!lineCache[sizeClass].isEmpty())
303         return lineCache[sizeClass].popFront();
304
305     if (!m_lineCache[sizeClass].isEmpty())
306         return m_lineCache[sizeClass].popFront();
307
308     m_scavenger->didStartGrowing();
309     
310     SmallPage* page = [&]() {
311         size_t pageClass = m_pageClasses[sizeClass];
312         
313         if (m_freePages[pageClass].isEmpty())
314             allocateSmallChunk(lock, pageClass);
315
316         Chunk* chunk = m_freePages[pageClass].tail();
317
318         chunk->ref();
319         chunk->setUsedSinceLastScavenge();
320
321         SmallPage* page = chunk->freePages().pop();
322         if (chunk->freePages().isEmpty())
323             m_freePages[pageClass].remove(chunk);
324
325         size_t pageSize = bmalloc::pageSize(pageClass);
326         size_t physicalSize = physicalPageSizeSloppy(page->begin()->begin(), pageSize);
327         if (page->hasPhysicalPages())
328             m_freeableMemory -= physicalSize;
329         else {
330             m_scavenger->scheduleIfUnderMemoryPressure(pageSize);
331             m_footprint += physicalSize;
332             vmAllocatePhysicalPagesSloppy(page->begin()->begin(), pageSize);
333             page->setHasPhysicalPages(true);
334 #if ENABLE_PHYSICAL_PAGE_MAP
335             m_physicalPageMap.commit(page->begin()->begin(), pageSize);
336 #endif
337         }
338         page->setUsedSinceLastScavenge();
339
340         return page;
341     }();
342
343     page->setSizeClass(sizeClass);
344     return page;
345 }
346
347 void Heap::deallocateSmallLine(std::unique_lock<Mutex>& lock, Object object, LineCache& lineCache)
348 {
349     BASSERT(!object.line()->refCount(lock));
350     SmallPage* page = object.page();
351     page->deref(lock);
352
353     if (!page->hasFreeLines(lock)) {
354         page->setHasFreeLines(lock, true);
355         lineCache[page->sizeClass()].push(page);
356     }
357
358     if (page->refCount(lock))
359         return;
360
361     size_t sizeClass = page->sizeClass();
362     size_t pageClass = m_pageClasses[sizeClass];
363
364     m_freeableMemory += physicalPageSizeSloppy(page->begin()->begin(), pageSize(pageClass));
365
366     List<SmallPage>::remove(page); // 'page' may be in any thread's line cache.
367     
368     Chunk* chunk = Chunk::get(page);
369     if (chunk->freePages().isEmpty())
370         m_freePages[pageClass].push(chunk);
371     chunk->freePages().push(page);
372
373     chunk->deref();
374
375     if (!chunk->refCount()) {
376         m_freePages[pageClass].remove(chunk);
377
378         if (!m_chunkCache[pageClass].isEmpty())
379             deallocateSmallChunk(m_chunkCache[pageClass].pop(), pageClass);
380
381         m_chunkCache[pageClass].push(chunk);
382     }
383     
384     m_scavenger->schedule(pageSize(pageClass));
385 }
386
387 void Heap::allocateSmallBumpRangesByMetadata(
388     std::unique_lock<Mutex>& lock, size_t sizeClass,
389     BumpAllocator& allocator, BumpRangeCache& rangeCache,
390     LineCache& lineCache)
391 {
392     RELEASE_BASSERT(isActiveHeapKind(m_kind));
393
394     SmallPage* page = allocateSmallPage(lock, sizeClass, lineCache);
395     SmallLine* lines = page->begin();
396     BASSERT(page->hasFreeLines(lock));
397     size_t smallLineCount = m_vmPageSizePhysical / smallLineSize;
398     LineMetadata* pageMetadata = &m_smallLineMetadata[sizeClass * smallLineCount];
399     
400     auto findSmallBumpRange = [&](size_t& lineNumber) {
401         for ( ; lineNumber < smallLineCount; ++lineNumber) {
402             if (!lines[lineNumber].refCount(lock)) {
403                 if (pageMetadata[lineNumber].objectCount)
404                     return true;
405             }
406         }
407         return false;
408     };
409
410     auto allocateSmallBumpRange = [&](size_t& lineNumber) -> BumpRange {
411         char* begin = lines[lineNumber].begin() + pageMetadata[lineNumber].startOffset;
412         unsigned short objectCount = 0;
413         
414         for ( ; lineNumber < smallLineCount; ++lineNumber) {
415             if (lines[lineNumber].refCount(lock))
416                 break;
417
418             if (!pageMetadata[lineNumber].objectCount)
419                 continue;
420
421             objectCount += pageMetadata[lineNumber].objectCount;
422             lines[lineNumber].ref(lock, pageMetadata[lineNumber].objectCount);
423             page->ref(lock);
424         }
425         return { begin, objectCount };
426     };
427
428     size_t lineNumber = 0;
429     for (;;) {
430         if (!findSmallBumpRange(lineNumber)) {
431             page->setHasFreeLines(lock, false);
432             BASSERT(allocator.canAllocate());
433             return;
434         }
435
436         // In a fragmented page, some free ranges might not fit in the cache.
437         if (rangeCache.size() == rangeCache.capacity()) {
438             lineCache[sizeClass].push(page);
439             BASSERT(allocator.canAllocate());
440             return;
441         }
442
443         BumpRange bumpRange = allocateSmallBumpRange(lineNumber);
444         if (allocator.canAllocate())
445             rangeCache.push(bumpRange);
446         else
447             allocator.refill(bumpRange);
448     }
449 }
450
451 void Heap::allocateSmallBumpRangesByObject(
452     std::unique_lock<Mutex>& lock, size_t sizeClass,
453     BumpAllocator& allocator, BumpRangeCache& rangeCache,
454     LineCache& lineCache)
455 {
456     RELEASE_BASSERT(isActiveHeapKind(m_kind));
457
458     size_t size = allocator.size();
459     SmallPage* page = allocateSmallPage(lock, sizeClass, lineCache);
460     BASSERT(page->hasFreeLines(lock));
461
462     auto findSmallBumpRange = [&](Object& it, Object& end) {
463         for ( ; it + size <= end; it = it + size) {
464             if (!it.line()->refCount(lock))
465                 return true;
466         }
467         return false;
468     };
469
470     auto allocateSmallBumpRange = [&](Object& it, Object& end) -> BumpRange {
471         char* begin = it.address();
472         unsigned short objectCount = 0;
473         for ( ; it + size <= end; it = it + size) {
474             if (it.line()->refCount(lock))
475                 break;
476
477             ++objectCount;
478             it.line()->ref(lock);
479             it.page()->ref(lock);
480         }
481         return { begin, objectCount };
482     };
483
484     Object it(page->begin()->begin());
485     Object end(it + pageSize(m_pageClasses[sizeClass]));
486     for (;;) {
487         if (!findSmallBumpRange(it, end)) {
488             page->setHasFreeLines(lock, false);
489             BASSERT(allocator.canAllocate());
490             return;
491         }
492
493         // In a fragmented page, some free ranges might not fit in the cache.
494         if (rangeCache.size() == rangeCache.capacity()) {
495             lineCache[sizeClass].push(page);
496             BASSERT(allocator.canAllocate());
497             return;
498         }
499
500         BumpRange bumpRange = allocateSmallBumpRange(it, end);
501         if (allocator.canAllocate())
502             rangeCache.push(bumpRange);
503         else
504             allocator.refill(bumpRange);
505     }
506 }
507
508 LargeRange Heap::splitAndAllocate(std::unique_lock<Mutex>&, LargeRange& range, size_t alignment, size_t size)
509 {
510     RELEASE_BASSERT(isActiveHeapKind(m_kind));
511
512     LargeRange prev;
513     LargeRange next;
514
515     size_t alignmentMask = alignment - 1;
516     if (test(range.begin(), alignmentMask)) {
517         size_t prefixSize = roundUpToMultipleOf(alignment, range.begin()) - range.begin();
518         std::pair<LargeRange, LargeRange> pair = range.split(prefixSize);
519         prev = pair.first;
520         range = pair.second;
521     }
522
523     if (range.size() - size > size / pageSizeWasteFactor) {
524         std::pair<LargeRange, LargeRange> pair = range.split(size);
525         range = pair.first;
526         next = pair.second;
527     }
528     
529     if (range.startPhysicalSize() < range.size()) {
530         m_scavenger->scheduleIfUnderMemoryPressure(range.size());
531         m_footprint += range.size() - range.totalPhysicalSize();
532         vmAllocatePhysicalPagesSloppy(range.begin() + range.startPhysicalSize(), range.size() - range.startPhysicalSize());
533         range.setStartPhysicalSize(range.size());
534         range.setTotalPhysicalSize(range.size());
535 #if ENABLE_PHYSICAL_PAGE_MAP 
536         m_physicalPageMap.commit(range.begin(), range.size());
537 #endif
538     }
539     
540     if (prev) {
541         m_freeableMemory += prev.totalPhysicalSize();
542         m_largeFree.add(prev);
543     }
544
545     if (next) {
546         m_freeableMemory += next.totalPhysicalSize();
547         m_largeFree.add(next);
548     }
549
550     m_objectTypes.set(Chunk::get(range.begin()), ObjectType::Large);
551
552     m_largeAllocated.set(range.begin(), range.size());
553     return range;
554 }
555
556 void* Heap::tryAllocateLarge(std::unique_lock<Mutex>& lock, size_t alignment, size_t size)
557 {
558     RELEASE_BASSERT(isActiveHeapKind(m_kind));
559
560     BASSERT(isPowerOfTwo(alignment));
561     
562     m_scavenger->didStartGrowing();
563     
564     size_t roundedSize = size ? roundUpToMultipleOf(largeAlignment, size) : largeAlignment;
565     if (roundedSize < size) // Check for overflow
566         return nullptr;
567     size = roundedSize;
568
569     size_t roundedAlignment = roundUpToMultipleOf<largeAlignment>(alignment);
570     if (roundedAlignment < alignment) // Check for overflow
571         return nullptr;
572     alignment = roundedAlignment;
573
574     LargeRange range = m_largeFree.remove(alignment, size);
575     if (!range) {
576         if (m_hasPendingDecommits) {
577             m_condition.wait(lock, [&]() { return !m_hasPendingDecommits; });
578             // Now we're guaranteed we're looking at all available memory.
579             return tryAllocateLarge(lock, alignment, size);
580         }
581
582         if (usingGigacage())
583             return nullptr;
584
585         range = VMHeap::get()->tryAllocateLargeChunk(alignment, size);
586         if (!range)
587             return nullptr;
588         
589         m_largeFree.add(range);
590         range = m_largeFree.remove(alignment, size);
591     }
592
593     m_freeableMemory -= range.totalPhysicalSize();
594
595     void* result = splitAndAllocate(lock, range, alignment, size).begin();
596     return result;
597 }
598
599 void* Heap::allocateLarge(std::unique_lock<Mutex>& lock, size_t alignment, size_t size)
600 {
601     void* result = tryAllocateLarge(lock, alignment, size);
602     RELEASE_BASSERT(result);
603     return result;
604 }
605
606 bool Heap::isLarge(std::unique_lock<Mutex>&, void* object)
607 {
608     return m_objectTypes.get(Object(object).chunk()) == ObjectType::Large;
609 }
610
611 size_t Heap::largeSize(std::unique_lock<Mutex>&, void* object)
612 {
613     return m_largeAllocated.get(object);
614 }
615
616 void Heap::shrinkLarge(std::unique_lock<Mutex>& lock, const Range& object, size_t newSize)
617 {
618     BASSERT(object.size() > newSize);
619
620     size_t size = m_largeAllocated.remove(object.begin());
621     LargeRange range = LargeRange(object, size, size);
622     splitAndAllocate(lock, range, alignment, newSize);
623
624     m_scavenger->schedule(size);
625 }
626
627 void Heap::deallocateLarge(std::unique_lock<Mutex>&, void* object)
628 {
629     size_t size = m_largeAllocated.remove(object);
630     m_largeFree.add(LargeRange(object, size, size, size));
631     m_freeableMemory += size;
632     m_scavenger->schedule(size);
633 }
634
635 void Heap::externalCommit(void* ptr, size_t size)
636 {
637     std::unique_lock<Mutex> lock(Heap::mutex());
638     externalCommit(lock, ptr, size);
639 }
640
641 void Heap::externalCommit(std::unique_lock<Mutex>&, void* ptr, size_t size)
642 {
643     BUNUSED_PARAM(ptr);
644
645     m_footprint += size;
646 #if ENABLE_PHYSICAL_PAGE_MAP 
647     m_physicalPageMap.commit(ptr, size);
648 #endif
649 }
650
651 void Heap::externalDecommit(void* ptr, size_t size)
652 {
653     std::unique_lock<Mutex> lock(Heap::mutex());
654     externalDecommit(lock, ptr, size);
655 }
656
657 void Heap::externalDecommit(std::unique_lock<Mutex>&, void* ptr, size_t size)
658 {
659     BUNUSED_PARAM(ptr);
660
661     m_footprint -= size;
662 #if ENABLE_PHYSICAL_PAGE_MAP 
663     m_physicalPageMap.decommit(ptr, size);
664 #endif
665 }
666
667 } // namespace bmalloc