Unreviewed, rolling out r241182.
[WebKit-https.git] / Source / bmalloc / bmalloc / Heap.cpp
1 /*
2  * Copyright (C) 2014-2018 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "Heap.h"
27
28 #include "AvailableMemory.h"
29 #include "BulkDecommit.h"
30 #include "BumpAllocator.h"
31 #include "Chunk.h"
32 #include "CryptoRandom.h"
33 #include "Environment.h"
34 #include "Gigacage.h"
35 #include "DebugHeap.h"
36 #include "PerProcess.h"
37 #include "Scavenger.h"
38 #include "SmallLine.h"
39 #include "SmallPage.h"
40 #include "VMHeap.h"
41 #include "bmalloc.h"
42 #include <thread>
43 #include <vector>
44
45 namespace bmalloc {
46
47 Heap::Heap(HeapKind kind, std::lock_guard<Mutex>&)
48     : m_kind(kind)
49     , m_vmPageSizePhysical(vmPageSizePhysical())
50     , m_debugHeap(nullptr)
51 {
52     RELEASE_BASSERT(vmPageSizePhysical() >= smallPageSize);
53     RELEASE_BASSERT(vmPageSize() >= vmPageSizePhysical());
54
55     initializeLineMetadata();
56     initializePageMetadata();
57     
58     if (PerProcess<Environment>::get()->isDebugHeapEnabled())
59         m_debugHeap = PerProcess<DebugHeap>::get();
60     else {
61         Gigacage::ensureGigacage();
62 #if GIGACAGE_ENABLED
63         if (usingGigacage()) {
64             RELEASE_BASSERT(gigacageBasePtr());
65             uint64_t random[2];
66             cryptoRandom(reinterpret_cast<unsigned char*>(random), sizeof(random));
67             size_t size = roundDownToMultipleOf(vmPageSize(), gigacageSize() - (random[0] % Gigacage::maximumCageSizeReductionForSlide));
68             ptrdiff_t offset = roundDownToMultipleOf(vmPageSize(), random[1] % (gigacageSize() - size));
69             void* base = reinterpret_cast<unsigned char*>(gigacageBasePtr()) + offset;
70             m_largeFree.add(LargeRange(base, size, 0, 0));
71         }
72 #endif
73     }
74     
75     m_scavenger = PerProcess<Scavenger>::get();
76 }
77
78 bool Heap::usingGigacage()
79 {
80     return isGigacage(m_kind) && gigacageBasePtr();
81 }
82
83 void* Heap::gigacageBasePtr()
84 {
85     return Gigacage::basePtr(gigacageKind(m_kind));
86 }
87
88 size_t Heap::gigacageSize()
89 {
90     return Gigacage::size(gigacageKind(m_kind));
91 }
92
93 void Heap::initializeLineMetadata()
94 {
95     size_t sizeClassCount = bmalloc::sizeClass(smallLineSize);
96     size_t smallLineCount = m_vmPageSizePhysical / smallLineSize;
97     m_smallLineMetadata.grow(sizeClassCount * smallLineCount);
98
99     for (size_t sizeClass = 0; sizeClass < sizeClassCount; ++sizeClass) {
100         size_t size = objectSize(sizeClass);
101         LineMetadata* pageMetadata = &m_smallLineMetadata[sizeClass * smallLineCount];
102
103         size_t object = 0;
104         size_t line = 0;
105         while (object < m_vmPageSizePhysical) {
106             line = object / smallLineSize;
107             size_t leftover = object % smallLineSize;
108
109             size_t objectCount;
110             size_t remainder;
111             divideRoundingUp(smallLineSize - leftover, size, objectCount, remainder);
112
113             pageMetadata[line] = { static_cast<unsigned char>(leftover), static_cast<unsigned char>(objectCount) };
114
115             object += objectCount * size;
116         }
117
118         // Don't allow the last object in a page to escape the page.
119         if (object > m_vmPageSizePhysical) {
120             BASSERT(pageMetadata[line].objectCount);
121             --pageMetadata[line].objectCount;
122         }
123     }
124 }
125
126 void Heap::initializePageMetadata()
127 {
128     auto computePageSize = [&](size_t sizeClass) {
129         size_t size = objectSize(sizeClass);
130         if (sizeClass < bmalloc::sizeClass(smallLineSize))
131             return m_vmPageSizePhysical;
132
133         for (size_t pageSize = m_vmPageSizePhysical;
134             pageSize < pageSizeMax;
135             pageSize += m_vmPageSizePhysical) {
136             RELEASE_BASSERT(pageSize <= chunkSize / 2);
137             size_t waste = pageSize % size;
138             if (waste <= pageSize / pageSizeWasteFactor)
139                 return pageSize;
140         }
141         
142         return pageSizeMax;
143     };
144
145     for (size_t i = 0; i < sizeClassCount; ++i)
146         m_pageClasses[i] = (computePageSize(i) - 1) / smallPageSize;
147 }
148
149 size_t Heap::freeableMemory(std::lock_guard<Mutex>&)
150 {
151     return m_freeableMemory;
152 }
153
154 size_t Heap::footprint()
155 {
156     BASSERT(!m_debugHeap);
157     return m_footprint;
158 }
159
160 void Heap::markAllLargeAsEligibile(std::lock_guard<Mutex>&)
161 {
162     m_largeFree.markAllAsEligibile();
163     m_hasPendingDecommits = false;
164     m_condition.notify_all();
165 }
166
167 void Heap::decommitLargeRange(std::lock_guard<Mutex>&, LargeRange& range, BulkDecommit& decommitter)
168 {
169     m_footprint -= range.totalPhysicalSize();
170     m_freeableMemory -= range.totalPhysicalSize();
171     decommitter.addLazy(range.begin(), range.size());
172     m_hasPendingDecommits = true;
173     range.setStartPhysicalSize(0);
174     range.setTotalPhysicalSize(0);
175     BASSERT(range.isEligibile());
176     range.setEligible(false);
177 #if ENABLE_PHYSICAL_PAGE_MAP 
178     m_physicalPageMap.decommit(range.begin(), range.size());
179 #endif
180 }
181
182 void Heap::scavenge(std::lock_guard<Mutex>& lock, BulkDecommit& decommitter)
183 {
184     for (auto& list : m_freePages) {
185         for (auto* chunk : list) {
186             for (auto* page : chunk->freePages()) {
187                 if (!page->hasPhysicalPages())
188                     continue;
189
190                 size_t pageSize = bmalloc::pageSize(&list - &m_freePages[0]);
191                 size_t decommitSize = physicalPageSizeSloppy(page->begin()->begin(), pageSize);
192                 m_freeableMemory -= decommitSize;
193                 m_footprint -= decommitSize;
194                 decommitter.addEager(page->begin()->begin(), pageSize);
195                 page->setHasPhysicalPages(false);
196 #if ENABLE_PHYSICAL_PAGE_MAP 
197                 m_physicalPageMap.decommit(page->begin()->begin(), pageSize);
198 #endif
199             }
200         }
201     }
202     
203     for (auto& list : m_chunkCache) {
204         while (!list.isEmpty())
205             deallocateSmallChunk(list.pop(), &list - &m_chunkCache[0]);
206     }
207
208     for (LargeRange& range : m_largeFree) {
209         m_highWatermark = std::min(m_highWatermark, static_cast<void*>(range.begin()));
210         decommitLargeRange(lock, range, decommitter);
211     }
212
213     m_freeableMemory = 0;
214 }
215
216 void Heap::scavengeToHighWatermark(std::lock_guard<Mutex>& lock, BulkDecommit& decommitter)
217 {
218     void* newHighWaterMark = nullptr;
219     for (LargeRange& range : m_largeFree) {
220         if (range.begin() <= m_highWatermark)
221             newHighWaterMark = std::min(newHighWaterMark, static_cast<void*>(range.begin()));
222         else
223             decommitLargeRange(lock, range, decommitter);
224     }
225     m_highWatermark = newHighWaterMark;
226 }
227
228 void Heap::deallocateLineCache(std::unique_lock<Mutex>&, LineCache& lineCache)
229 {
230     for (auto& list : lineCache) {
231         while (!list.isEmpty()) {
232             size_t sizeClass = &list - &lineCache[0];
233             m_lineCache[sizeClass].push(list.popFront());
234         }
235     }
236 }
237
238 void Heap::allocateSmallChunk(std::unique_lock<Mutex>& lock, size_t pageClass)
239 {
240     RELEASE_BASSERT(isActiveHeapKind(m_kind));
241     
242     size_t pageSize = bmalloc::pageSize(pageClass);
243
244     Chunk* chunk = [&]() {
245         if (!m_chunkCache[pageClass].isEmpty())
246             return m_chunkCache[pageClass].pop();
247
248         void* memory = allocateLarge(lock, chunkSize, chunkSize);
249
250         Chunk* chunk = new (memory) Chunk(pageSize);
251
252         m_objectTypes.set(chunk, ObjectType::Small);
253
254         forEachPage(chunk, pageSize, [&](SmallPage* page) {
255             page->setHasPhysicalPages(true);
256             page->setHasFreeLines(lock, true);
257             chunk->freePages().push(page);
258         });
259
260         m_freeableMemory += chunkSize;
261         
262         m_scavenger->schedule(0);
263
264         return chunk;
265     }();
266     
267     m_freePages[pageClass].push(chunk);
268 }
269
270 void Heap::deallocateSmallChunk(Chunk* chunk, size_t pageClass)
271 {
272     m_objectTypes.set(chunk, ObjectType::Large);
273     
274     size_t size = m_largeAllocated.remove(chunk);
275     size_t totalPhysicalSize = size;
276
277     size_t accountedInFreeable = 0;
278
279     bool hasPhysicalPages = true;
280     forEachPage(chunk, pageSize(pageClass), [&](SmallPage* page) {
281         size_t physicalSize = physicalPageSizeSloppy(page->begin()->begin(), pageSize(pageClass));
282         if (!page->hasPhysicalPages()) {
283             totalPhysicalSize -= physicalSize;
284             hasPhysicalPages = false;
285         } else
286             accountedInFreeable += physicalSize;
287     });
288
289     m_freeableMemory -= accountedInFreeable;
290     m_freeableMemory += totalPhysicalSize;
291
292     size_t startPhysicalSize = hasPhysicalPages ? size : 0;
293     m_largeFree.add(LargeRange(chunk, size, startPhysicalSize, totalPhysicalSize));
294 }
295
296 SmallPage* Heap::allocateSmallPage(std::unique_lock<Mutex>& lock, size_t sizeClass, LineCache& lineCache)
297 {
298     RELEASE_BASSERT(isActiveHeapKind(m_kind));
299
300     if (!lineCache[sizeClass].isEmpty())
301         return lineCache[sizeClass].popFront();
302
303     if (!m_lineCache[sizeClass].isEmpty())
304         return m_lineCache[sizeClass].popFront();
305
306     m_scavenger->didStartGrowing();
307     
308     SmallPage* page = [&]() {
309         size_t pageClass = m_pageClasses[sizeClass];
310         
311         if (m_freePages[pageClass].isEmpty())
312             allocateSmallChunk(lock, pageClass);
313
314         Chunk* chunk = m_freePages[pageClass].tail();
315
316         chunk->ref();
317
318         SmallPage* page = chunk->freePages().pop();
319         if (chunk->freePages().isEmpty())
320             m_freePages[pageClass].remove(chunk);
321
322         size_t pageSize = bmalloc::pageSize(pageClass);
323         size_t physicalSize = physicalPageSizeSloppy(page->begin()->begin(), pageSize);
324         if (page->hasPhysicalPages())
325             m_freeableMemory -= physicalSize;
326         else {
327             m_scavenger->scheduleIfUnderMemoryPressure(pageSize);
328             m_footprint += physicalSize;
329             vmAllocatePhysicalPagesSloppy(page->begin()->begin(), pageSize);
330             page->setHasPhysicalPages(true);
331 #if ENABLE_PHYSICAL_PAGE_MAP 
332             m_physicalPageMap.commit(page->begin()->begin(), pageSize);
333 #endif
334         }
335
336         return page;
337     }();
338
339     page->setSizeClass(sizeClass);
340     return page;
341 }
342
343 void Heap::deallocateSmallLine(std::unique_lock<Mutex>& lock, Object object, LineCache& lineCache)
344 {
345     BASSERT(!object.line()->refCount(lock));
346     SmallPage* page = object.page();
347     page->deref(lock);
348
349     if (!page->hasFreeLines(lock)) {
350         page->setHasFreeLines(lock, true);
351         lineCache[page->sizeClass()].push(page);
352     }
353
354     if (page->refCount(lock))
355         return;
356
357     size_t sizeClass = page->sizeClass();
358     size_t pageClass = m_pageClasses[sizeClass];
359
360     m_freeableMemory += physicalPageSizeSloppy(page->begin()->begin(), pageSize(pageClass));
361
362     List<SmallPage>::remove(page); // 'page' may be in any thread's line cache.
363     
364     Chunk* chunk = Chunk::get(page);
365     if (chunk->freePages().isEmpty())
366         m_freePages[pageClass].push(chunk);
367     chunk->freePages().push(page);
368
369     chunk->deref();
370
371     if (!chunk->refCount()) {
372         m_freePages[pageClass].remove(chunk);
373
374         if (!m_chunkCache[pageClass].isEmpty())
375             deallocateSmallChunk(m_chunkCache[pageClass].pop(), pageClass);
376
377         m_chunkCache[pageClass].push(chunk);
378     }
379     
380     m_scavenger->schedule(pageSize(pageClass));
381 }
382
383 void Heap::allocateSmallBumpRangesByMetadata(
384     std::unique_lock<Mutex>& lock, size_t sizeClass,
385     BumpAllocator& allocator, BumpRangeCache& rangeCache,
386     LineCache& lineCache)
387 {
388     RELEASE_BASSERT(isActiveHeapKind(m_kind));
389
390     SmallPage* page = allocateSmallPage(lock, sizeClass, lineCache);
391     SmallLine* lines = page->begin();
392     BASSERT(page->hasFreeLines(lock));
393     size_t smallLineCount = m_vmPageSizePhysical / smallLineSize;
394     LineMetadata* pageMetadata = &m_smallLineMetadata[sizeClass * smallLineCount];
395     
396     auto findSmallBumpRange = [&](size_t& lineNumber) {
397         for ( ; lineNumber < smallLineCount; ++lineNumber) {
398             if (!lines[lineNumber].refCount(lock)) {
399                 if (pageMetadata[lineNumber].objectCount)
400                     return true;
401             }
402         }
403         return false;
404     };
405
406     auto allocateSmallBumpRange = [&](size_t& lineNumber) -> BumpRange {
407         char* begin = lines[lineNumber].begin() + pageMetadata[lineNumber].startOffset;
408         unsigned short objectCount = 0;
409         
410         for ( ; lineNumber < smallLineCount; ++lineNumber) {
411             if (lines[lineNumber].refCount(lock))
412                 break;
413
414             if (!pageMetadata[lineNumber].objectCount)
415                 continue;
416
417             objectCount += pageMetadata[lineNumber].objectCount;
418             lines[lineNumber].ref(lock, pageMetadata[lineNumber].objectCount);
419             page->ref(lock);
420         }
421         return { begin, objectCount };
422     };
423
424     size_t lineNumber = 0;
425     for (;;) {
426         if (!findSmallBumpRange(lineNumber)) {
427             page->setHasFreeLines(lock, false);
428             BASSERT(allocator.canAllocate());
429             return;
430         }
431
432         // In a fragmented page, some free ranges might not fit in the cache.
433         if (rangeCache.size() == rangeCache.capacity()) {
434             lineCache[sizeClass].push(page);
435             BASSERT(allocator.canAllocate());
436             return;
437         }
438
439         BumpRange bumpRange = allocateSmallBumpRange(lineNumber);
440         if (allocator.canAllocate())
441             rangeCache.push(bumpRange);
442         else
443             allocator.refill(bumpRange);
444     }
445 }
446
447 void Heap::allocateSmallBumpRangesByObject(
448     std::unique_lock<Mutex>& lock, size_t sizeClass,
449     BumpAllocator& allocator, BumpRangeCache& rangeCache,
450     LineCache& lineCache)
451 {
452     RELEASE_BASSERT(isActiveHeapKind(m_kind));
453
454     size_t size = allocator.size();
455     SmallPage* page = allocateSmallPage(lock, sizeClass, lineCache);
456     BASSERT(page->hasFreeLines(lock));
457
458     auto findSmallBumpRange = [&](Object& it, Object& end) {
459         for ( ; it + size <= end; it = it + size) {
460             if (!it.line()->refCount(lock))
461                 return true;
462         }
463         return false;
464     };
465
466     auto allocateSmallBumpRange = [&](Object& it, Object& end) -> BumpRange {
467         char* begin = it.address();
468         unsigned short objectCount = 0;
469         for ( ; it + size <= end; it = it + size) {
470             if (it.line()->refCount(lock))
471                 break;
472
473             ++objectCount;
474             it.line()->ref(lock);
475             it.page()->ref(lock);
476         }
477         return { begin, objectCount };
478     };
479
480     Object it(page->begin()->begin());
481     Object end(it + pageSize(m_pageClasses[sizeClass]));
482     for (;;) {
483         if (!findSmallBumpRange(it, end)) {
484             page->setHasFreeLines(lock, false);
485             BASSERT(allocator.canAllocate());
486             return;
487         }
488
489         // In a fragmented page, some free ranges might not fit in the cache.
490         if (rangeCache.size() == rangeCache.capacity()) {
491             lineCache[sizeClass].push(page);
492             BASSERT(allocator.canAllocate());
493             return;
494         }
495
496         BumpRange bumpRange = allocateSmallBumpRange(it, end);
497         if (allocator.canAllocate())
498             rangeCache.push(bumpRange);
499         else
500             allocator.refill(bumpRange);
501     }
502 }
503
504 LargeRange Heap::splitAndAllocate(std::unique_lock<Mutex>&, LargeRange& range, size_t alignment, size_t size)
505 {
506     RELEASE_BASSERT(isActiveHeapKind(m_kind));
507
508     LargeRange prev;
509     LargeRange next;
510
511     size_t alignmentMask = alignment - 1;
512     if (test(range.begin(), alignmentMask)) {
513         size_t prefixSize = roundUpToMultipleOf(alignment, range.begin()) - range.begin();
514         std::pair<LargeRange, LargeRange> pair = range.split(prefixSize);
515         prev = pair.first;
516         range = pair.second;
517     }
518
519     if (range.size() - size > size / pageSizeWasteFactor) {
520         std::pair<LargeRange, LargeRange> pair = range.split(size);
521         range = pair.first;
522         next = pair.second;
523     }
524     
525     if (range.startPhysicalSize() < range.size()) {
526         m_scavenger->scheduleIfUnderMemoryPressure(range.size());
527         m_footprint += range.size() - range.totalPhysicalSize();
528         vmAllocatePhysicalPagesSloppy(range.begin() + range.startPhysicalSize(), range.size() - range.startPhysicalSize());
529         range.setStartPhysicalSize(range.size());
530         range.setTotalPhysicalSize(range.size());
531 #if ENABLE_PHYSICAL_PAGE_MAP 
532         m_physicalPageMap.commit(range.begin(), range.size());
533 #endif
534     }
535     
536     if (prev) {
537         m_freeableMemory += prev.totalPhysicalSize();
538         m_largeFree.add(prev);
539     }
540
541     if (next) {
542         m_freeableMemory += next.totalPhysicalSize();
543         m_largeFree.add(next);
544     }
545
546     m_objectTypes.set(Chunk::get(range.begin()), ObjectType::Large);
547
548     m_largeAllocated.set(range.begin(), range.size());
549     return range;
550 }
551
552 void* Heap::tryAllocateLarge(std::unique_lock<Mutex>& lock, size_t alignment, size_t size)
553 {
554     RELEASE_BASSERT(isActiveHeapKind(m_kind));
555
556     BASSERT(isPowerOfTwo(alignment));
557     
558     if (m_debugHeap)
559         return m_debugHeap->memalignLarge(alignment, size);
560     
561     m_scavenger->didStartGrowing();
562     
563     size_t roundedSize = size ? roundUpToMultipleOf(largeAlignment, size) : largeAlignment;
564     if (roundedSize < size) // Check for overflow
565         return nullptr;
566     size = roundedSize;
567
568     size_t roundedAlignment = roundUpToMultipleOf<largeAlignment>(alignment);
569     if (roundedAlignment < alignment) // Check for overflow
570         return nullptr;
571     alignment = roundedAlignment;
572
573     LargeRange range = m_largeFree.remove(alignment, size);
574     if (!range) {
575         if (m_hasPendingDecommits) {
576             m_condition.wait(lock, [&]() { return !m_hasPendingDecommits; });
577             // Now we're guaranteed we're looking at all available memory.
578             return tryAllocateLarge(lock, alignment, size);
579         }
580
581         if (usingGigacage())
582             return nullptr;
583
584         range = PerProcess<VMHeap>::get()->tryAllocateLargeChunk(alignment, size);
585         if (!range)
586             return nullptr;
587         
588         m_largeFree.add(range);
589         range = m_largeFree.remove(alignment, size);
590     }
591
592     m_freeableMemory -= range.totalPhysicalSize();
593
594     void* result = splitAndAllocate(lock, range, alignment, size).begin();
595     m_highWatermark = std::max(m_highWatermark, result);
596     return result;
597 }
598
599 void* Heap::allocateLarge(std::unique_lock<Mutex>& lock, size_t alignment, size_t size)
600 {
601     void* result = tryAllocateLarge(lock, alignment, size);
602     RELEASE_BASSERT(result);
603     return result;
604 }
605
606 bool Heap::isLarge(std::unique_lock<Mutex>&, void* object)
607 {
608     return m_objectTypes.get(Object(object).chunk()) == ObjectType::Large;
609 }
610
611 size_t Heap::largeSize(std::unique_lock<Mutex>&, void* object)
612 {
613     return m_largeAllocated.get(object);
614 }
615
616 void Heap::shrinkLarge(std::unique_lock<Mutex>& lock, const Range& object, size_t newSize)
617 {
618     BASSERT(object.size() > newSize);
619
620     size_t size = m_largeAllocated.remove(object.begin());
621     LargeRange range = LargeRange(object, size, size);
622     splitAndAllocate(lock, range, alignment, newSize);
623
624     m_scavenger->schedule(size);
625 }
626
627 void Heap::deallocateLarge(std::unique_lock<Mutex>&, void* object)
628 {
629     if (m_debugHeap)
630         return m_debugHeap->freeLarge(object);
631
632     size_t size = m_largeAllocated.remove(object);
633     m_largeFree.add(LargeRange(object, size, size, size));
634     m_freeableMemory += size;
635     m_scavenger->schedule(size);
636 }
637
638 void Heap::externalCommit(void* ptr, size_t size)
639 {
640     std::unique_lock<Mutex> lock(Heap::mutex());
641     externalCommit(lock, ptr, size);
642 }
643
644 void Heap::externalCommit(std::unique_lock<Mutex>&, void* ptr, size_t size)
645 {
646     BUNUSED_PARAM(ptr);
647
648     m_footprint += size;
649 #if ENABLE_PHYSICAL_PAGE_MAP 
650     m_physicalPageMap.commit(ptr, size);
651 #endif
652 }
653
654 void Heap::externalDecommit(void* ptr, size_t size)
655 {
656     std::unique_lock<Mutex> lock(Heap::mutex());
657     externalDecommit(lock, ptr, size);
658 }
659
660 void Heap::externalDecommit(std::unique_lock<Mutex>&, void* ptr, size_t size)
661 {
662     BUNUSED_PARAM(ptr);
663
664     m_footprint -= size;
665 #if ENABLE_PHYSICAL_PAGE_MAP 
666     m_physicalPageMap.decommit(ptr, size);
667 #endif
668 }
669
670 } // namespace bmalloc