ef03fd4cc237be873a5d42f6b056fd40161f4989
[WebKit-https.git] / Source / bmalloc / bmalloc / Heap.cpp
1 /*
2  * Copyright (C) 2014-2018 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "Heap.h"
27
28 #include "AvailableMemory.h"
29 #include "BulkDecommit.h"
30 #include "BumpAllocator.h"
31 #include "Chunk.h"
32 #include "CryptoRandom.h"
33 #include "Environment.h"
34 #include "Gigacage.h"
35 #include "DebugHeap.h"
36 #include "PerProcess.h"
37 #include "Scavenger.h"
38 #include "SmallLine.h"
39 #include "SmallPage.h"
40 #include "VMHeap.h"
41 #include "bmalloc.h"
42 #include <thread>
43 #include <vector>
44
45 namespace bmalloc {
46
47 Heap::Heap(HeapKind kind, std::lock_guard<Mutex>&)
48     : m_kind(kind)
49     , m_vmPageSizePhysical(vmPageSizePhysical())
50 {
51     RELEASE_BASSERT(vmPageSizePhysical() >= smallPageSize);
52     RELEASE_BASSERT(vmPageSize() >= vmPageSizePhysical());
53
54     initializeLineMetadata();
55     initializePageMetadata();
56     
57     BASSERT(!Environment::get()->isDebugHeapEnabled());
58
59     Gigacage::ensureGigacage();
60 #if GIGACAGE_ENABLED
61     if (usingGigacage()) {
62         RELEASE_BASSERT(gigacageBasePtr());
63         uint64_t random[2];
64         cryptoRandom(reinterpret_cast<unsigned char*>(random), sizeof(random));
65         size_t size = roundDownToMultipleOf(vmPageSize(), gigacageSize() - (random[0] % Gigacage::maximumCageSizeReductionForSlide));
66         ptrdiff_t offset = roundDownToMultipleOf(vmPageSize(), random[1] % (gigacageSize() - size));
67         void* base = reinterpret_cast<unsigned char*>(gigacageBasePtr()) + offset;
68         m_largeFree.add(LargeRange(base, size, 0, 0));
69     }
70 #endif
71     
72     m_scavenger = Scavenger::get();
73 }
74
75 bool Heap::usingGigacage()
76 {
77     return isGigacage(m_kind) && gigacageBasePtr();
78 }
79
80 void* Heap::gigacageBasePtr()
81 {
82     return Gigacage::basePtr(gigacageKind(m_kind));
83 }
84
85 size_t Heap::gigacageSize()
86 {
87     return Gigacage::size(gigacageKind(m_kind));
88 }
89
90 void Heap::initializeLineMetadata()
91 {
92     size_t sizeClassCount = bmalloc::sizeClass(smallLineSize);
93     size_t smallLineCount = m_vmPageSizePhysical / smallLineSize;
94     m_smallLineMetadata.grow(sizeClassCount * smallLineCount);
95
96     for (size_t sizeClass = 0; sizeClass < sizeClassCount; ++sizeClass) {
97         size_t size = objectSize(sizeClass);
98         LineMetadata* pageMetadata = &m_smallLineMetadata[sizeClass * smallLineCount];
99
100         size_t object = 0;
101         size_t line = 0;
102         while (object < m_vmPageSizePhysical) {
103             line = object / smallLineSize;
104             size_t leftover = object % smallLineSize;
105
106             size_t objectCount;
107             size_t remainder;
108             divideRoundingUp(smallLineSize - leftover, size, objectCount, remainder);
109
110             pageMetadata[line] = { static_cast<unsigned char>(leftover), static_cast<unsigned char>(objectCount) };
111
112             object += objectCount * size;
113         }
114
115         // Don't allow the last object in a page to escape the page.
116         if (object > m_vmPageSizePhysical) {
117             BASSERT(pageMetadata[line].objectCount);
118             --pageMetadata[line].objectCount;
119         }
120     }
121 }
122
123 void Heap::initializePageMetadata()
124 {
125     auto computePageSize = [&](size_t sizeClass) {
126         size_t size = objectSize(sizeClass);
127         if (sizeClass < bmalloc::sizeClass(smallLineSize))
128             return m_vmPageSizePhysical;
129
130         for (size_t pageSize = m_vmPageSizePhysical;
131             pageSize < pageSizeMax;
132             pageSize += m_vmPageSizePhysical) {
133             RELEASE_BASSERT(pageSize <= chunkSize / 2);
134             size_t waste = pageSize % size;
135             if (waste <= pageSize / pageSizeWasteFactor)
136                 return pageSize;
137         }
138         
139         return pageSizeMax;
140     };
141
142     for (size_t i = 0; i < sizeClassCount; ++i)
143         m_pageClasses[i] = (computePageSize(i) - 1) / smallPageSize;
144 }
145
146 size_t Heap::freeableMemory(std::lock_guard<Mutex>&)
147 {
148     return m_freeableMemory;
149 }
150
151 size_t Heap::footprint()
152 {
153     return m_footprint;
154 }
155
156 void Heap::markAllLargeAsEligibile(std::lock_guard<Mutex>&)
157 {
158     m_largeFree.markAllAsEligibile();
159     m_hasPendingDecommits = false;
160     m_condition.notify_all();
161 }
162
163 void Heap::decommitLargeRange(std::lock_guard<Mutex>&, LargeRange& range, BulkDecommit& decommitter)
164 {
165     m_footprint -= range.totalPhysicalSize();
166     m_freeableMemory -= range.totalPhysicalSize();
167     decommitter.addLazy(range.begin(), range.size());
168     m_hasPendingDecommits = true;
169     range.setStartPhysicalSize(0);
170     range.setTotalPhysicalSize(0);
171     BASSERT(range.isEligibile());
172     range.setEligible(false);
173 #if ENABLE_PHYSICAL_PAGE_MAP 
174     m_physicalPageMap.decommit(range.begin(), range.size());
175 #endif
176 }
177
178 void Heap::scavenge(std::lock_guard<Mutex>& lock, BulkDecommit& decommitter)
179 {
180     for (auto& list : m_freePages) {
181         for (auto* chunk : list) {
182             for (auto* page : chunk->freePages()) {
183                 if (!page->hasPhysicalPages())
184                     continue;
185
186                 size_t pageSize = bmalloc::pageSize(&list - &m_freePages[0]);
187                 size_t decommitSize = physicalPageSizeSloppy(page->begin()->begin(), pageSize);
188                 m_freeableMemory -= decommitSize;
189                 m_footprint -= decommitSize;
190                 decommitter.addEager(page->begin()->begin(), pageSize);
191                 page->setHasPhysicalPages(false);
192 #if ENABLE_PHYSICAL_PAGE_MAP 
193                 m_physicalPageMap.decommit(page->begin()->begin(), pageSize);
194 #endif
195             }
196         }
197     }
198     
199     for (auto& list : m_chunkCache) {
200         while (!list.isEmpty())
201             deallocateSmallChunk(list.pop(), &list - &m_chunkCache[0]);
202     }
203
204     for (LargeRange& range : m_largeFree) {
205         m_highWatermark = std::min(m_highWatermark, static_cast<void*>(range.begin()));
206         decommitLargeRange(lock, range, decommitter);
207     }
208
209     m_freeableMemory = 0;
210 }
211
212 void Heap::scavengeToHighWatermark(std::lock_guard<Mutex>& lock, BulkDecommit& decommitter)
213 {
214     void* newHighWaterMark = nullptr;
215     for (LargeRange& range : m_largeFree) {
216         if (range.begin() <= m_highWatermark)
217             newHighWaterMark = std::min(newHighWaterMark, static_cast<void*>(range.begin()));
218         else
219             decommitLargeRange(lock, range, decommitter);
220     }
221     m_highWatermark = newHighWaterMark;
222 }
223
224 void Heap::deallocateLineCache(std::unique_lock<Mutex>&, LineCache& lineCache)
225 {
226     for (auto& list : lineCache) {
227         while (!list.isEmpty()) {
228             size_t sizeClass = &list - &lineCache[0];
229             m_lineCache[sizeClass].push(list.popFront());
230         }
231     }
232 }
233
234 void Heap::allocateSmallChunk(std::unique_lock<Mutex>& lock, size_t pageClass)
235 {
236     RELEASE_BASSERT(isActiveHeapKind(m_kind));
237     
238     size_t pageSize = bmalloc::pageSize(pageClass);
239
240     Chunk* chunk = [&]() {
241         if (!m_chunkCache[pageClass].isEmpty())
242             return m_chunkCache[pageClass].pop();
243
244         void* memory = allocateLarge(lock, chunkSize, chunkSize);
245
246         Chunk* chunk = new (memory) Chunk(pageSize);
247
248         m_objectTypes.set(chunk, ObjectType::Small);
249
250         forEachPage(chunk, pageSize, [&](SmallPage* page) {
251             page->setHasPhysicalPages(true);
252             page->setHasFreeLines(lock, true);
253             chunk->freePages().push(page);
254         });
255
256         m_freeableMemory += chunkSize;
257         
258         m_scavenger->schedule(0);
259
260         return chunk;
261     }();
262     
263     m_freePages[pageClass].push(chunk);
264 }
265
266 void Heap::deallocateSmallChunk(Chunk* chunk, size_t pageClass)
267 {
268     m_objectTypes.set(chunk, ObjectType::Large);
269     
270     size_t size = m_largeAllocated.remove(chunk);
271     size_t totalPhysicalSize = size;
272
273     size_t accountedInFreeable = 0;
274
275     bool hasPhysicalPages = true;
276     forEachPage(chunk, pageSize(pageClass), [&](SmallPage* page) {
277         size_t physicalSize = physicalPageSizeSloppy(page->begin()->begin(), pageSize(pageClass));
278         if (!page->hasPhysicalPages()) {
279             totalPhysicalSize -= physicalSize;
280             hasPhysicalPages = false;
281         } else
282             accountedInFreeable += physicalSize;
283     });
284
285     m_freeableMemory -= accountedInFreeable;
286     m_freeableMemory += totalPhysicalSize;
287
288     size_t startPhysicalSize = hasPhysicalPages ? size : 0;
289     m_largeFree.add(LargeRange(chunk, size, startPhysicalSize, totalPhysicalSize));
290 }
291
292 SmallPage* Heap::allocateSmallPage(std::unique_lock<Mutex>& lock, size_t sizeClass, LineCache& lineCache)
293 {
294     RELEASE_BASSERT(isActiveHeapKind(m_kind));
295
296     if (!lineCache[sizeClass].isEmpty())
297         return lineCache[sizeClass].popFront();
298
299     if (!m_lineCache[sizeClass].isEmpty())
300         return m_lineCache[sizeClass].popFront();
301
302     m_scavenger->didStartGrowing();
303     
304     SmallPage* page = [&]() {
305         size_t pageClass = m_pageClasses[sizeClass];
306         
307         if (m_freePages[pageClass].isEmpty())
308             allocateSmallChunk(lock, pageClass);
309
310         Chunk* chunk = m_freePages[pageClass].tail();
311
312         chunk->ref();
313
314         SmallPage* page = chunk->freePages().pop();
315         if (chunk->freePages().isEmpty())
316             m_freePages[pageClass].remove(chunk);
317
318         size_t pageSize = bmalloc::pageSize(pageClass);
319         size_t physicalSize = physicalPageSizeSloppy(page->begin()->begin(), pageSize);
320         if (page->hasPhysicalPages())
321             m_freeableMemory -= physicalSize;
322         else {
323             m_scavenger->scheduleIfUnderMemoryPressure(pageSize);
324             m_footprint += physicalSize;
325             vmAllocatePhysicalPagesSloppy(page->begin()->begin(), pageSize);
326             page->setHasPhysicalPages(true);
327 #if ENABLE_PHYSICAL_PAGE_MAP 
328             m_physicalPageMap.commit(page->begin()->begin(), pageSize);
329 #endif
330         }
331
332         return page;
333     }();
334
335     page->setSizeClass(sizeClass);
336     return page;
337 }
338
339 void Heap::deallocateSmallLine(std::unique_lock<Mutex>& lock, Object object, LineCache& lineCache)
340 {
341     BASSERT(!object.line()->refCount(lock));
342     SmallPage* page = object.page();
343     page->deref(lock);
344
345     if (!page->hasFreeLines(lock)) {
346         page->setHasFreeLines(lock, true);
347         lineCache[page->sizeClass()].push(page);
348     }
349
350     if (page->refCount(lock))
351         return;
352
353     size_t sizeClass = page->sizeClass();
354     size_t pageClass = m_pageClasses[sizeClass];
355
356     m_freeableMemory += physicalPageSizeSloppy(page->begin()->begin(), pageSize(pageClass));
357
358     List<SmallPage>::remove(page); // 'page' may be in any thread's line cache.
359     
360     Chunk* chunk = Chunk::get(page);
361     if (chunk->freePages().isEmpty())
362         m_freePages[pageClass].push(chunk);
363     chunk->freePages().push(page);
364
365     chunk->deref();
366
367     if (!chunk->refCount()) {
368         m_freePages[pageClass].remove(chunk);
369
370         if (!m_chunkCache[pageClass].isEmpty())
371             deallocateSmallChunk(m_chunkCache[pageClass].pop(), pageClass);
372
373         m_chunkCache[pageClass].push(chunk);
374     }
375     
376     m_scavenger->schedule(pageSize(pageClass));
377 }
378
379 void Heap::allocateSmallBumpRangesByMetadata(
380     std::unique_lock<Mutex>& lock, size_t sizeClass,
381     BumpAllocator& allocator, BumpRangeCache& rangeCache,
382     LineCache& lineCache)
383 {
384     RELEASE_BASSERT(isActiveHeapKind(m_kind));
385
386     SmallPage* page = allocateSmallPage(lock, sizeClass, lineCache);
387     SmallLine* lines = page->begin();
388     BASSERT(page->hasFreeLines(lock));
389     size_t smallLineCount = m_vmPageSizePhysical / smallLineSize;
390     LineMetadata* pageMetadata = &m_smallLineMetadata[sizeClass * smallLineCount];
391     
392     auto findSmallBumpRange = [&](size_t& lineNumber) {
393         for ( ; lineNumber < smallLineCount; ++lineNumber) {
394             if (!lines[lineNumber].refCount(lock)) {
395                 if (pageMetadata[lineNumber].objectCount)
396                     return true;
397             }
398         }
399         return false;
400     };
401
402     auto allocateSmallBumpRange = [&](size_t& lineNumber) -> BumpRange {
403         char* begin = lines[lineNumber].begin() + pageMetadata[lineNumber].startOffset;
404         unsigned short objectCount = 0;
405         
406         for ( ; lineNumber < smallLineCount; ++lineNumber) {
407             if (lines[lineNumber].refCount(lock))
408                 break;
409
410             if (!pageMetadata[lineNumber].objectCount)
411                 continue;
412
413             objectCount += pageMetadata[lineNumber].objectCount;
414             lines[lineNumber].ref(lock, pageMetadata[lineNumber].objectCount);
415             page->ref(lock);
416         }
417         return { begin, objectCount };
418     };
419
420     size_t lineNumber = 0;
421     for (;;) {
422         if (!findSmallBumpRange(lineNumber)) {
423             page->setHasFreeLines(lock, false);
424             BASSERT(allocator.canAllocate());
425             return;
426         }
427
428         // In a fragmented page, some free ranges might not fit in the cache.
429         if (rangeCache.size() == rangeCache.capacity()) {
430             lineCache[sizeClass].push(page);
431             BASSERT(allocator.canAllocate());
432             return;
433         }
434
435         BumpRange bumpRange = allocateSmallBumpRange(lineNumber);
436         if (allocator.canAllocate())
437             rangeCache.push(bumpRange);
438         else
439             allocator.refill(bumpRange);
440     }
441 }
442
443 void Heap::allocateSmallBumpRangesByObject(
444     std::unique_lock<Mutex>& lock, size_t sizeClass,
445     BumpAllocator& allocator, BumpRangeCache& rangeCache,
446     LineCache& lineCache)
447 {
448     RELEASE_BASSERT(isActiveHeapKind(m_kind));
449
450     size_t size = allocator.size();
451     SmallPage* page = allocateSmallPage(lock, sizeClass, lineCache);
452     BASSERT(page->hasFreeLines(lock));
453
454     auto findSmallBumpRange = [&](Object& it, Object& end) {
455         for ( ; it + size <= end; it = it + size) {
456             if (!it.line()->refCount(lock))
457                 return true;
458         }
459         return false;
460     };
461
462     auto allocateSmallBumpRange = [&](Object& it, Object& end) -> BumpRange {
463         char* begin = it.address();
464         unsigned short objectCount = 0;
465         for ( ; it + size <= end; it = it + size) {
466             if (it.line()->refCount(lock))
467                 break;
468
469             ++objectCount;
470             it.line()->ref(lock);
471             it.page()->ref(lock);
472         }
473         return { begin, objectCount };
474     };
475
476     Object it(page->begin()->begin());
477     Object end(it + pageSize(m_pageClasses[sizeClass]));
478     for (;;) {
479         if (!findSmallBumpRange(it, end)) {
480             page->setHasFreeLines(lock, false);
481             BASSERT(allocator.canAllocate());
482             return;
483         }
484
485         // In a fragmented page, some free ranges might not fit in the cache.
486         if (rangeCache.size() == rangeCache.capacity()) {
487             lineCache[sizeClass].push(page);
488             BASSERT(allocator.canAllocate());
489             return;
490         }
491
492         BumpRange bumpRange = allocateSmallBumpRange(it, end);
493         if (allocator.canAllocate())
494             rangeCache.push(bumpRange);
495         else
496             allocator.refill(bumpRange);
497     }
498 }
499
500 LargeRange Heap::splitAndAllocate(std::unique_lock<Mutex>&, LargeRange& range, size_t alignment, size_t size)
501 {
502     RELEASE_BASSERT(isActiveHeapKind(m_kind));
503
504     LargeRange prev;
505     LargeRange next;
506
507     size_t alignmentMask = alignment - 1;
508     if (test(range.begin(), alignmentMask)) {
509         size_t prefixSize = roundUpToMultipleOf(alignment, range.begin()) - range.begin();
510         std::pair<LargeRange, LargeRange> pair = range.split(prefixSize);
511         prev = pair.first;
512         range = pair.second;
513     }
514
515     if (range.size() - size > size / pageSizeWasteFactor) {
516         std::pair<LargeRange, LargeRange> pair = range.split(size);
517         range = pair.first;
518         next = pair.second;
519     }
520     
521     if (range.startPhysicalSize() < range.size()) {
522         m_scavenger->scheduleIfUnderMemoryPressure(range.size());
523         m_footprint += range.size() - range.totalPhysicalSize();
524         vmAllocatePhysicalPagesSloppy(range.begin() + range.startPhysicalSize(), range.size() - range.startPhysicalSize());
525         range.setStartPhysicalSize(range.size());
526         range.setTotalPhysicalSize(range.size());
527 #if ENABLE_PHYSICAL_PAGE_MAP 
528         m_physicalPageMap.commit(range.begin(), range.size());
529 #endif
530     }
531     
532     if (prev) {
533         m_freeableMemory += prev.totalPhysicalSize();
534         m_largeFree.add(prev);
535     }
536
537     if (next) {
538         m_freeableMemory += next.totalPhysicalSize();
539         m_largeFree.add(next);
540     }
541
542     m_objectTypes.set(Chunk::get(range.begin()), ObjectType::Large);
543
544     m_largeAllocated.set(range.begin(), range.size());
545     return range;
546 }
547
548 void* Heap::tryAllocateLarge(std::unique_lock<Mutex>& lock, size_t alignment, size_t size)
549 {
550     RELEASE_BASSERT(isActiveHeapKind(m_kind));
551
552     BASSERT(isPowerOfTwo(alignment));
553     
554     m_scavenger->didStartGrowing();
555     
556     size_t roundedSize = size ? roundUpToMultipleOf(largeAlignment, size) : largeAlignment;
557     if (roundedSize < size) // Check for overflow
558         return nullptr;
559     size = roundedSize;
560
561     size_t roundedAlignment = roundUpToMultipleOf<largeAlignment>(alignment);
562     if (roundedAlignment < alignment) // Check for overflow
563         return nullptr;
564     alignment = roundedAlignment;
565
566     LargeRange range = m_largeFree.remove(alignment, size);
567     if (!range) {
568         if (m_hasPendingDecommits) {
569             m_condition.wait(lock, [&]() { return !m_hasPendingDecommits; });
570             // Now we're guaranteed we're looking at all available memory.
571             return tryAllocateLarge(lock, alignment, size);
572         }
573
574         if (usingGigacage())
575             return nullptr;
576
577         range = VMHeap::get()->tryAllocateLargeChunk(alignment, size);
578         if (!range)
579             return nullptr;
580         
581         m_largeFree.add(range);
582         range = m_largeFree.remove(alignment, size);
583     }
584
585     m_freeableMemory -= range.totalPhysicalSize();
586
587     void* result = splitAndAllocate(lock, range, alignment, size).begin();
588     m_highWatermark = std::max(m_highWatermark, result);
589     return result;
590 }
591
592 void* Heap::allocateLarge(std::unique_lock<Mutex>& lock, size_t alignment, size_t size)
593 {
594     void* result = tryAllocateLarge(lock, alignment, size);
595     RELEASE_BASSERT(result);
596     return result;
597 }
598
599 bool Heap::isLarge(std::unique_lock<Mutex>&, void* object)
600 {
601     return m_objectTypes.get(Object(object).chunk()) == ObjectType::Large;
602 }
603
604 size_t Heap::largeSize(std::unique_lock<Mutex>&, void* object)
605 {
606     return m_largeAllocated.get(object);
607 }
608
609 void Heap::shrinkLarge(std::unique_lock<Mutex>& lock, const Range& object, size_t newSize)
610 {
611     BASSERT(object.size() > newSize);
612
613     size_t size = m_largeAllocated.remove(object.begin());
614     LargeRange range = LargeRange(object, size, size);
615     splitAndAllocate(lock, range, alignment, newSize);
616
617     m_scavenger->schedule(size);
618 }
619
620 void Heap::deallocateLarge(std::unique_lock<Mutex>&, void* object)
621 {
622     size_t size = m_largeAllocated.remove(object);
623     m_largeFree.add(LargeRange(object, size, size, size));
624     m_freeableMemory += size;
625     m_scavenger->schedule(size);
626 }
627
628 void Heap::externalCommit(void* ptr, size_t size)
629 {
630     std::unique_lock<Mutex> lock(Heap::mutex());
631     externalCommit(lock, ptr, size);
632 }
633
634 void Heap::externalCommit(std::unique_lock<Mutex>&, void* ptr, size_t size)
635 {
636     BUNUSED_PARAM(ptr);
637
638     m_footprint += size;
639 #if ENABLE_PHYSICAL_PAGE_MAP 
640     m_physicalPageMap.commit(ptr, size);
641 #endif
642 }
643
644 void Heap::externalDecommit(void* ptr, size_t size)
645 {
646     std::unique_lock<Mutex> lock(Heap::mutex());
647     externalDecommit(lock, ptr, size);
648 }
649
650 void Heap::externalDecommit(std::unique_lock<Mutex>&, void* ptr, size_t size)
651 {
652     BUNUSED_PARAM(ptr);
653
654     m_footprint -= size;
655 #if ENABLE_PHYSICAL_PAGE_MAP 
656     m_physicalPageMap.decommit(ptr, size);
657 #endif
658 }
659
660 } // namespace bmalloc