Enable gigacage on iOS
[WebKit-https.git] / Source / bmalloc / bmalloc / Heap.cpp
1 /*
2  * Copyright (C) 2014-2017 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "Heap.h"
27
28 #include "AvailableMemory.h"
29 #include "BumpAllocator.h"
30 #include "Chunk.h"
31 #include "Environment.h"
32 #include "Gigacage.h"
33 #include "DebugHeap.h"
34 #include "PerProcess.h"
35 #include "Scavenger.h"
36 #include "SmallLine.h"
37 #include "SmallPage.h"
38 #include "VMHeap.h"
39 #include "bmalloc.h"
40 #include <thread>
41
42 namespace bmalloc {
43
44 Heap::Heap(HeapKind kind, std::lock_guard<StaticMutex>&)
45     : m_kind(kind)
46     , m_vmPageSizePhysical(vmPageSizePhysical())
47     , m_debugHeap(nullptr)
48 {
49     RELEASE_BASSERT(vmPageSizePhysical() >= smallPageSize);
50     RELEASE_BASSERT(vmPageSize() >= vmPageSizePhysical());
51
52     initializeLineMetadata();
53     initializePageMetadata();
54     
55     if (PerProcess<Environment>::get()->isDebugHeapEnabled())
56         m_debugHeap = PerProcess<DebugHeap>::get();
57     else {
58         Gigacage::ensureGigacage();
59 #if GIGACAGE_ENABLED
60         if (usingGigacage()) {
61             RELEASE_BASSERT(gigacageBasePtr());
62             m_largeFree.add(LargeRange(gigacageBasePtr(), gigacageSize(), 0));
63         }
64 #endif
65     }
66     
67     m_scavenger = PerProcess<Scavenger>::get();
68 }
69
70 bool Heap::usingGigacage()
71 {
72     return isGigacage(m_kind) && gigacageBasePtr();
73 }
74
75 void* Heap::gigacageBasePtr()
76 {
77     return Gigacage::basePtr(gigacageKind(m_kind));
78 }
79
80 size_t Heap::gigacageSize()
81 {
82     return Gigacage::size(gigacageKind(m_kind));
83 }
84
85 void Heap::initializeLineMetadata()
86 {
87     size_t sizeClassCount = bmalloc::sizeClass(smallLineSize);
88     size_t smallLineCount = m_vmPageSizePhysical / smallLineSize;
89     m_smallLineMetadata.grow(sizeClassCount * smallLineCount);
90
91     for (size_t sizeClass = 0; sizeClass < sizeClassCount; ++sizeClass) {
92         size_t size = objectSize(sizeClass);
93         LineMetadata* pageMetadata = &m_smallLineMetadata[sizeClass * smallLineCount];
94
95         size_t object = 0;
96         size_t line = 0;
97         while (object < m_vmPageSizePhysical) {
98             line = object / smallLineSize;
99             size_t leftover = object % smallLineSize;
100
101             size_t objectCount;
102             size_t remainder;
103             divideRoundingUp(smallLineSize - leftover, size, objectCount, remainder);
104
105             pageMetadata[line] = { static_cast<unsigned char>(leftover), static_cast<unsigned char>(objectCount) };
106
107             object += objectCount * size;
108         }
109
110         // Don't allow the last object in a page to escape the page.
111         if (object > m_vmPageSizePhysical) {
112             BASSERT(pageMetadata[line].objectCount);
113             --pageMetadata[line].objectCount;
114         }
115     }
116 }
117
118 void Heap::initializePageMetadata()
119 {
120     auto computePageSize = [&](size_t sizeClass) {
121         size_t size = objectSize(sizeClass);
122         if (sizeClass < bmalloc::sizeClass(smallLineSize))
123             return m_vmPageSizePhysical;
124
125         for (size_t pageSize = m_vmPageSizePhysical;
126             pageSize < pageSizeMax;
127             pageSize += m_vmPageSizePhysical) {
128             RELEASE_BASSERT(pageSize <= chunkSize / 2);
129             size_t waste = pageSize % size;
130             if (waste <= pageSize / pageSizeWasteFactor)
131                 return pageSize;
132         }
133         
134         return pageSizeMax;
135     };
136
137     for (size_t i = 0; i < sizeClassCount; ++i)
138         m_pageClasses[i] = (computePageSize(i) - 1) / smallPageSize;
139 }
140
141 void Heap::scavenge(std::lock_guard<StaticMutex>&)
142 {
143     for (auto& list : m_freePages) {
144         for (auto* chunk : list) {
145             for (auto* page : chunk->freePages()) {
146                 if (!page->hasPhysicalPages())
147                     continue;
148
149                 vmDeallocatePhysicalPagesSloppy(page->begin()->begin(), pageSize(&list - &m_freePages[0]));
150
151                 page->setHasPhysicalPages(false);
152             }
153         }
154     }
155     
156     for (auto& list : m_chunkCache) {
157         while (!list.isEmpty())
158             deallocateSmallChunk(list.pop(), &list - &m_chunkCache[0]);
159     }
160
161     for (auto& range : m_largeFree) {
162         vmDeallocatePhysicalPagesSloppy(range.begin(), range.size());
163
164         range.setPhysicalSize(0);
165     }
166 }
167
168 void Heap::deallocateLineCache(std::lock_guard<StaticMutex>&, LineCache& lineCache)
169 {
170     for (auto& list : lineCache) {
171         while (!list.isEmpty()) {
172             size_t sizeClass = &list - &lineCache[0];
173             m_lineCache[sizeClass].push(list.popFront());
174         }
175     }
176 }
177
178 void Heap::allocateSmallChunk(std::lock_guard<StaticMutex>& lock, size_t pageClass)
179 {
180     RELEASE_BASSERT(isActiveHeapKind(m_kind));
181     
182     size_t pageSize = bmalloc::pageSize(pageClass);
183
184     Chunk* chunk = [&]() {
185         if (!m_chunkCache[pageClass].isEmpty())
186             return m_chunkCache[pageClass].pop();
187
188         void* memory = allocateLarge(lock, chunkSize, chunkSize);
189
190         Chunk* chunk = new (memory) Chunk(pageSize);
191
192         m_objectTypes.set(chunk, ObjectType::Small);
193
194         forEachPage(chunk, pageSize, [&](SmallPage* page) {
195             page->setHasPhysicalPages(true);
196             page->setHasFreeLines(lock, true);
197             chunk->freePages().push(page);
198         });
199         
200         m_scavenger->schedule(0);
201
202         return chunk;
203     }();
204     
205     m_freePages[pageClass].push(chunk);
206 }
207
208 void Heap::deallocateSmallChunk(Chunk* chunk, size_t pageClass)
209 {
210     m_objectTypes.set(chunk, ObjectType::Large);
211     
212     size_t size = m_largeAllocated.remove(chunk);
213
214     bool hasPhysicalPages = true;
215     forEachPage(chunk, pageSize(pageClass), [&](SmallPage* page) {
216         if (!page->hasPhysicalPages())
217             hasPhysicalPages = false;
218     });
219     size_t physicalSize = hasPhysicalPages ? size : 0;
220
221     m_largeFree.add(LargeRange(chunk, size, physicalSize));
222 }
223
224 SmallPage* Heap::allocateSmallPage(std::lock_guard<StaticMutex>& lock, size_t sizeClass, LineCache& lineCache)
225 {
226     RELEASE_BASSERT(isActiveHeapKind(m_kind));
227
228     if (!lineCache[sizeClass].isEmpty())
229         return lineCache[sizeClass].popFront();
230
231     if (!m_lineCache[sizeClass].isEmpty())
232         return m_lineCache[sizeClass].popFront();
233
234     m_scavenger->didStartGrowing();
235     
236     SmallPage* page = [&]() {
237         size_t pageClass = m_pageClasses[sizeClass];
238         
239         if (m_freePages[pageClass].isEmpty())
240             allocateSmallChunk(lock, pageClass);
241
242         Chunk* chunk = m_freePages[pageClass].tail();
243
244         chunk->ref();
245
246         SmallPage* page = chunk->freePages().pop();
247         if (chunk->freePages().isEmpty())
248             m_freePages[pageClass].remove(chunk);
249
250         if (!page->hasPhysicalPages()) {
251             m_scavenger->scheduleIfUnderMemoryPressure(pageSize(pageClass));
252
253             vmAllocatePhysicalPagesSloppy(page->begin()->begin(), pageSize(pageClass));
254             page->setHasPhysicalPages(true);
255         }
256
257         return page;
258     }();
259
260     page->setSizeClass(sizeClass);
261     return page;
262 }
263
264 void Heap::deallocateSmallLine(std::lock_guard<StaticMutex>& lock, Object object, LineCache& lineCache)
265 {
266     BASSERT(!object.line()->refCount(lock));
267     SmallPage* page = object.page();
268     page->deref(lock);
269
270     if (!page->hasFreeLines(lock)) {
271         page->setHasFreeLines(lock, true);
272         lineCache[page->sizeClass()].push(page);
273     }
274
275     if (page->refCount(lock))
276         return;
277
278     size_t sizeClass = page->sizeClass();
279     size_t pageClass = m_pageClasses[sizeClass];
280
281     List<SmallPage>::remove(page); // 'page' may be in any thread's line cache.
282     
283     Chunk* chunk = Chunk::get(page);
284     if (chunk->freePages().isEmpty())
285         m_freePages[pageClass].push(chunk);
286     chunk->freePages().push(page);
287
288     chunk->deref();
289
290     if (!chunk->refCount()) {
291         m_freePages[pageClass].remove(chunk);
292
293         if (!m_chunkCache[pageClass].isEmpty())
294             deallocateSmallChunk(m_chunkCache[pageClass].pop(), pageClass);
295
296         m_chunkCache[pageClass].push(chunk);
297     }
298     
299     m_scavenger->schedule(pageSize(pageClass));
300 }
301
302 void Heap::allocateSmallBumpRangesByMetadata(
303     std::lock_guard<StaticMutex>& lock, size_t sizeClass,
304     BumpAllocator& allocator, BumpRangeCache& rangeCache,
305     LineCache& lineCache)
306 {
307     RELEASE_BASSERT(isActiveHeapKind(m_kind));
308
309     SmallPage* page = allocateSmallPage(lock, sizeClass, lineCache);
310     SmallLine* lines = page->begin();
311     BASSERT(page->hasFreeLines(lock));
312     size_t smallLineCount = m_vmPageSizePhysical / smallLineSize;
313     LineMetadata* pageMetadata = &m_smallLineMetadata[sizeClass * smallLineCount];
314     
315     auto findSmallBumpRange = [&](size_t& lineNumber) {
316         for ( ; lineNumber < smallLineCount; ++lineNumber) {
317             if (!lines[lineNumber].refCount(lock)) {
318                 if (pageMetadata[lineNumber].objectCount)
319                     return true;
320             }
321         }
322         return false;
323     };
324
325     auto allocateSmallBumpRange = [&](size_t& lineNumber) -> BumpRange {
326         char* begin = lines[lineNumber].begin() + pageMetadata[lineNumber].startOffset;
327         unsigned short objectCount = 0;
328         
329         for ( ; lineNumber < smallLineCount; ++lineNumber) {
330             if (lines[lineNumber].refCount(lock))
331                 break;
332
333             if (!pageMetadata[lineNumber].objectCount)
334                 continue;
335
336             objectCount += pageMetadata[lineNumber].objectCount;
337             lines[lineNumber].ref(lock, pageMetadata[lineNumber].objectCount);
338             page->ref(lock);
339         }
340         return { begin, objectCount };
341     };
342
343     size_t lineNumber = 0;
344     for (;;) {
345         if (!findSmallBumpRange(lineNumber)) {
346             page->setHasFreeLines(lock, false);
347             BASSERT(allocator.canAllocate());
348             return;
349         }
350
351         // In a fragmented page, some free ranges might not fit in the cache.
352         if (rangeCache.size() == rangeCache.capacity()) {
353             lineCache[sizeClass].push(page);
354             BASSERT(allocator.canAllocate());
355             return;
356         }
357
358         BumpRange bumpRange = allocateSmallBumpRange(lineNumber);
359         if (allocator.canAllocate())
360             rangeCache.push(bumpRange);
361         else
362             allocator.refill(bumpRange);
363     }
364 }
365
366 void Heap::allocateSmallBumpRangesByObject(
367     std::lock_guard<StaticMutex>& lock, size_t sizeClass,
368     BumpAllocator& allocator, BumpRangeCache& rangeCache,
369     LineCache& lineCache)
370 {
371     RELEASE_BASSERT(isActiveHeapKind(m_kind));
372
373     size_t size = allocator.size();
374     SmallPage* page = allocateSmallPage(lock, sizeClass, lineCache);
375     BASSERT(page->hasFreeLines(lock));
376
377     auto findSmallBumpRange = [&](Object& it, Object& end) {
378         for ( ; it + size <= end; it = it + size) {
379             if (!it.line()->refCount(lock))
380                 return true;
381         }
382         return false;
383     };
384
385     auto allocateSmallBumpRange = [&](Object& it, Object& end) -> BumpRange {
386         char* begin = it.address();
387         unsigned short objectCount = 0;
388         for ( ; it + size <= end; it = it + size) {
389             if (it.line()->refCount(lock))
390                 break;
391
392             ++objectCount;
393             it.line()->ref(lock);
394             it.page()->ref(lock);
395         }
396         return { begin, objectCount };
397     };
398
399     Object it(page->begin()->begin());
400     Object end(it + pageSize(m_pageClasses[sizeClass]));
401     for (;;) {
402         if (!findSmallBumpRange(it, end)) {
403             page->setHasFreeLines(lock, false);
404             BASSERT(allocator.canAllocate());
405             return;
406         }
407
408         // In a fragmented page, some free ranges might not fit in the cache.
409         if (rangeCache.size() == rangeCache.capacity()) {
410             lineCache[sizeClass].push(page);
411             BASSERT(allocator.canAllocate());
412             return;
413         }
414
415         BumpRange bumpRange = allocateSmallBumpRange(it, end);
416         if (allocator.canAllocate())
417             rangeCache.push(bumpRange);
418         else
419             allocator.refill(bumpRange);
420     }
421 }
422
423 LargeRange Heap::splitAndAllocate(LargeRange& range, size_t alignment, size_t size, AllocationKind allocationKind)
424 {
425     RELEASE_BASSERT(isActiveHeapKind(m_kind));
426
427     LargeRange prev;
428     LargeRange next;
429
430     size_t alignmentMask = alignment - 1;
431     if (test(range.begin(), alignmentMask)) {
432         size_t prefixSize = roundUpToMultipleOf(alignment, range.begin()) - range.begin();
433         std::pair<LargeRange, LargeRange> pair = range.split(prefixSize);
434         prev = pair.first;
435         range = pair.second;
436     }
437
438     if (range.size() - size > size / pageSizeWasteFactor) {
439         std::pair<LargeRange, LargeRange> pair = range.split(size);
440         range = pair.first;
441         next = pair.second;
442     }
443     
444     switch (allocationKind) {
445     case AllocationKind::Virtual:
446         if (range.physicalSize())
447             vmDeallocatePhysicalPagesSloppy(range.begin(), range.size());
448         break;
449         
450     case AllocationKind::Physical:
451         if (range.physicalSize() < range.size()) {
452             m_scavenger->scheduleIfUnderMemoryPressure(range.size());
453             
454             vmAllocatePhysicalPagesSloppy(range.begin() + range.physicalSize(), range.size() - range.physicalSize());
455             range.setPhysicalSize(range.size());
456         }
457         break;
458     }
459     
460     if (prev)
461         m_largeFree.add(prev);
462
463     if (next)
464         m_largeFree.add(next);
465
466     m_objectTypes.set(Chunk::get(range.begin()), ObjectType::Large);
467
468     m_largeAllocated.set(range.begin(), range.size());
469     return range;
470 }
471
472 void* Heap::tryAllocateLarge(std::lock_guard<StaticMutex>&, size_t alignment, size_t size, AllocationKind allocationKind)
473 {
474     RELEASE_BASSERT(isActiveHeapKind(m_kind));
475
476     BASSERT(isPowerOfTwo(alignment));
477     
478     if (m_debugHeap)
479         return m_debugHeap->memalignLarge(alignment, size, allocationKind);
480     
481     m_scavenger->didStartGrowing();
482     
483     size_t roundedSize = size ? roundUpToMultipleOf(largeAlignment, size) : largeAlignment;
484     if (roundedSize < size) // Check for overflow
485         return nullptr;
486     size = roundedSize;
487
488     size_t roundedAlignment = roundUpToMultipleOf<largeAlignment>(alignment);
489     if (roundedAlignment < alignment) // Check for overflow
490         return nullptr;
491     alignment = roundedAlignment;
492
493     LargeRange range = m_largeFree.remove(alignment, size);
494     if (!range) {
495         if (usingGigacage())
496             return nullptr;
497
498         range = PerProcess<VMHeap>::get()->tryAllocateLargeChunk(alignment, size, allocationKind);
499         if (!range)
500             return nullptr;
501         
502         m_largeFree.add(range);
503
504         range = m_largeFree.remove(alignment, size);
505     }
506
507     return splitAndAllocate(range, alignment, size, allocationKind).begin();
508 }
509
510 void* Heap::allocateLarge(std::lock_guard<StaticMutex>& lock, size_t alignment, size_t size, AllocationKind allocationKind)
511 {
512     void* result = tryAllocateLarge(lock, alignment, size, allocationKind);
513     RELEASE_BASSERT(result);
514     return result;
515 }
516
517 bool Heap::isLarge(std::lock_guard<StaticMutex>&, void* object)
518 {
519     return m_objectTypes.get(Object(object).chunk()) == ObjectType::Large;
520 }
521
522 size_t Heap::largeSize(std::lock_guard<StaticMutex>&, void* object)
523 {
524     return m_largeAllocated.get(object);
525 }
526
527 void Heap::shrinkLarge(std::lock_guard<StaticMutex>&, const Range& object, size_t newSize)
528 {
529     BASSERT(object.size() > newSize);
530
531     size_t size = m_largeAllocated.remove(object.begin());
532     LargeRange range = LargeRange(object, size);
533     splitAndAllocate(range, alignment, newSize, AllocationKind::Physical);
534
535     m_scavenger->schedule(size);
536 }
537
538 void Heap::deallocateLarge(std::lock_guard<StaticMutex>&, void* object, AllocationKind allocationKind)
539 {
540     if (m_debugHeap)
541         return m_debugHeap->freeLarge(object, allocationKind);
542
543     size_t size = m_largeAllocated.remove(object);
544     m_largeFree.add(LargeRange(object, size, allocationKind == AllocationKind::Physical ? size : 0));
545     m_scavenger->schedule(size);
546 }
547
548 } // namespace bmalloc