Unreviewed, rolling out r223015 and r223025.
[WebKit-https.git] / Source / bmalloc / bmalloc / Heap.cpp
1 /*
2  * Copyright (C) 2014-2017 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "Heap.h"
27
28 #include "AvailableMemory.h"
29 #include "BumpAllocator.h"
30 #include "Chunk.h"
31 #include "Environment.h"
32 #include "Gigacage.h"
33 #include "DebugHeap.h"
34 #include "PerProcess.h"
35 #include "Scavenger.h"
36 #include "SmallLine.h"
37 #include "SmallPage.h"
38 #include "VMHeap.h"
39 #include "bmalloc.h"
40 #include <thread>
41
42 namespace bmalloc {
43
44 Heap::Heap(HeapKind kind, std::lock_guard<StaticMutex>&)
45     : m_kind(kind)
46     , m_vmPageSizePhysical(vmPageSizePhysical())
47     , m_debugHeap(nullptr)
48 {
49     RELEASE_BASSERT(vmPageSizePhysical() >= smallPageSize);
50     RELEASE_BASSERT(vmPageSize() >= vmPageSizePhysical());
51
52     initializeLineMetadata();
53     initializePageMetadata();
54     
55     if (PerProcess<Environment>::get()->isDebugHeapEnabled())
56         m_debugHeap = PerProcess<DebugHeap>::get();
57     else {
58         Gigacage::ensureGigacage();
59 #if GIGACAGE_ENABLED
60         if (usingGigacage()) {
61             RELEASE_BASSERT(gigacageBasePtr());
62             m_largeFree.add(LargeRange(gigacageBasePtr(), gigacageSize(), 0));
63         }
64 #endif
65     }
66     
67     m_scavenger = PerProcess<Scavenger>::get();
68 }
69
70 bool Heap::usingGigacage()
71 {
72     return isGigacage(m_kind) && gigacageBasePtr();
73 }
74
75 void* Heap::gigacageBasePtr()
76 {
77     return Gigacage::basePtr(gigacageKind(m_kind));
78 }
79
80 size_t Heap::gigacageSize()
81 {
82     return Gigacage::size(gigacageKind(m_kind));
83 }
84
85 void Heap::initializeLineMetadata()
86 {
87     size_t sizeClassCount = bmalloc::sizeClass(smallLineSize);
88     size_t smallLineCount = m_vmPageSizePhysical / smallLineSize;
89     m_smallLineMetadata.grow(sizeClassCount * smallLineCount);
90
91     for (size_t sizeClass = 0; sizeClass < sizeClassCount; ++sizeClass) {
92         size_t size = objectSize(sizeClass);
93         LineMetadata* pageMetadata = &m_smallLineMetadata[sizeClass * smallLineCount];
94
95         size_t object = 0;
96         size_t line = 0;
97         while (object < m_vmPageSizePhysical) {
98             line = object / smallLineSize;
99             size_t leftover = object % smallLineSize;
100
101             size_t objectCount;
102             size_t remainder;
103             divideRoundingUp(smallLineSize - leftover, size, objectCount, remainder);
104
105             pageMetadata[line] = { static_cast<unsigned char>(leftover), static_cast<unsigned char>(objectCount) };
106
107             object += objectCount * size;
108         }
109
110         // Don't allow the last object in a page to escape the page.
111         if (object > m_vmPageSizePhysical) {
112             BASSERT(pageMetadata[line].objectCount);
113             --pageMetadata[line].objectCount;
114         }
115     }
116 }
117
118 void Heap::initializePageMetadata()
119 {
120     auto computePageSize = [&](size_t sizeClass) {
121         size_t size = objectSize(sizeClass);
122         if (sizeClass < bmalloc::sizeClass(smallLineSize))
123             return m_vmPageSizePhysical;
124
125         for (size_t pageSize = m_vmPageSizePhysical;
126             pageSize < pageSizeMax;
127             pageSize += m_vmPageSizePhysical) {
128             RELEASE_BASSERT(pageSize <= chunkSize / 2);
129             size_t waste = pageSize % size;
130             if (waste <= pageSize / pageSizeWasteFactor)
131                 return pageSize;
132         }
133         
134         return pageSizeMax;
135     };
136
137     for (size_t i = 0; i < sizeClassCount; ++i)
138         m_pageClasses[i] = (computePageSize(i) - 1) / smallPageSize;
139 }
140
141 void Heap::scavenge(std::lock_guard<StaticMutex>&)
142 {
143     for (auto& list : m_freePages) {
144         for (auto* chunk : list) {
145             for (auto* page : chunk->freePages()) {
146                 if (!page->hasPhysicalPages())
147                     continue;
148
149                 vmDeallocatePhysicalPagesSloppy(page->begin()->begin(), pageSize(&list - &m_freePages[0]));
150
151                 page->setHasPhysicalPages(false);
152             }
153         }
154     }
155     
156     for (auto& list : m_chunkCache) {
157         while (!list.isEmpty())
158             deallocateSmallChunk(list.pop(), &list - &m_chunkCache[0]);
159     }
160
161     for (auto& range : m_largeFree) {
162         vmDeallocatePhysicalPagesSloppy(range.begin(), range.size());
163
164         range.setPhysicalSize(0);
165     }
166 }
167
168 void Heap::deallocateLineCache(std::lock_guard<StaticMutex>&, LineCache& lineCache)
169 {
170     for (auto& list : lineCache) {
171         while (!list.isEmpty()) {
172             size_t sizeClass = &list - &lineCache[0];
173             m_lineCache[sizeClass].push(list.popFront());
174         }
175     }
176 }
177
178 void Heap::allocateSmallChunk(std::lock_guard<StaticMutex>& lock, size_t pageClass)
179 {
180     size_t pageSize = bmalloc::pageSize(pageClass);
181
182     Chunk* chunk = [&]() {
183         if (!m_chunkCache[pageClass].isEmpty())
184             return m_chunkCache[pageClass].pop();
185
186         void* memory = allocateLarge(lock, chunkSize, chunkSize);
187
188         Chunk* chunk = new (memory) Chunk(pageSize);
189
190         m_objectTypes.set(chunk, ObjectType::Small);
191
192         forEachPage(chunk, pageSize, [&](SmallPage* page) {
193             page->setHasPhysicalPages(true);
194             page->setHasFreeLines(lock, true);
195             chunk->freePages().push(page);
196         });
197         
198         m_scavenger->schedule(0);
199
200         return chunk;
201     }();
202     
203     m_freePages[pageClass].push(chunk);
204 }
205
206 void Heap::deallocateSmallChunk(Chunk* chunk, size_t pageClass)
207 {
208     m_objectTypes.set(chunk, ObjectType::Large);
209     
210     size_t size = m_largeAllocated.remove(chunk);
211
212     bool hasPhysicalPages = true;
213     forEachPage(chunk, pageSize(pageClass), [&](SmallPage* page) {
214         if (!page->hasPhysicalPages())
215             hasPhysicalPages = false;
216     });
217     size_t physicalSize = hasPhysicalPages ? size : 0;
218
219     m_largeFree.add(LargeRange(chunk, size, physicalSize));
220 }
221
222 SmallPage* Heap::allocateSmallPage(std::lock_guard<StaticMutex>& lock, size_t sizeClass, LineCache& lineCache)
223 {
224     if (!lineCache[sizeClass].isEmpty())
225         return lineCache[sizeClass].popFront();
226
227     if (!m_lineCache[sizeClass].isEmpty())
228         return m_lineCache[sizeClass].popFront();
229
230     m_scavenger->didStartGrowing();
231     
232     SmallPage* page = [&]() {
233         size_t pageClass = m_pageClasses[sizeClass];
234         
235         if (m_freePages[pageClass].isEmpty())
236             allocateSmallChunk(lock, pageClass);
237
238         Chunk* chunk = m_freePages[pageClass].tail();
239
240         chunk->ref();
241
242         SmallPage* page = chunk->freePages().pop();
243         if (chunk->freePages().isEmpty())
244             m_freePages[pageClass].remove(chunk);
245
246         if (!page->hasPhysicalPages()) {
247             m_scavenger->scheduleIfUnderMemoryPressure(pageSize(pageClass));
248
249             vmAllocatePhysicalPagesSloppy(page->begin()->begin(), pageSize(pageClass));
250             page->setHasPhysicalPages(true);
251         }
252
253         return page;
254     }();
255
256     page->setSizeClass(sizeClass);
257     return page;
258 }
259
260 void Heap::deallocateSmallLine(std::lock_guard<StaticMutex>& lock, Object object, LineCache& lineCache)
261 {
262     BASSERT(!object.line()->refCount(lock));
263     SmallPage* page = object.page();
264     page->deref(lock);
265
266     if (!page->hasFreeLines(lock)) {
267         page->setHasFreeLines(lock, true);
268         lineCache[page->sizeClass()].push(page);
269     }
270
271     if (page->refCount(lock))
272         return;
273
274     size_t sizeClass = page->sizeClass();
275     size_t pageClass = m_pageClasses[sizeClass];
276
277     List<SmallPage>::remove(page); // 'page' may be in any thread's line cache.
278     
279     Chunk* chunk = Chunk::get(page);
280     if (chunk->freePages().isEmpty())
281         m_freePages[pageClass].push(chunk);
282     chunk->freePages().push(page);
283
284     chunk->deref();
285
286     if (!chunk->refCount()) {
287         m_freePages[pageClass].remove(chunk);
288
289         if (!m_chunkCache[pageClass].isEmpty())
290             deallocateSmallChunk(m_chunkCache[pageClass].pop(), pageClass);
291
292         m_chunkCache[pageClass].push(chunk);
293     }
294     
295     m_scavenger->schedule(pageSize(pageClass));
296 }
297
298 void Heap::allocateSmallBumpRangesByMetadata(
299     std::lock_guard<StaticMutex>& lock, size_t sizeClass,
300     BumpAllocator& allocator, BumpRangeCache& rangeCache,
301     LineCache& lineCache)
302 {
303     SmallPage* page = allocateSmallPage(lock, sizeClass, lineCache);
304     SmallLine* lines = page->begin();
305     BASSERT(page->hasFreeLines(lock));
306     size_t smallLineCount = m_vmPageSizePhysical / smallLineSize;
307     LineMetadata* pageMetadata = &m_smallLineMetadata[sizeClass * smallLineCount];
308     
309     auto findSmallBumpRange = [&](size_t& lineNumber) {
310         for ( ; lineNumber < smallLineCount; ++lineNumber) {
311             if (!lines[lineNumber].refCount(lock)) {
312                 if (pageMetadata[lineNumber].objectCount)
313                     return true;
314             }
315         }
316         return false;
317     };
318
319     auto allocateSmallBumpRange = [&](size_t& lineNumber) -> BumpRange {
320         char* begin = lines[lineNumber].begin() + pageMetadata[lineNumber].startOffset;
321         unsigned short objectCount = 0;
322         
323         for ( ; lineNumber < smallLineCount; ++lineNumber) {
324             if (lines[lineNumber].refCount(lock))
325                 break;
326
327             if (!pageMetadata[lineNumber].objectCount)
328                 continue;
329
330             objectCount += pageMetadata[lineNumber].objectCount;
331             lines[lineNumber].ref(lock, pageMetadata[lineNumber].objectCount);
332             page->ref(lock);
333         }
334         return { begin, objectCount };
335     };
336
337     size_t lineNumber = 0;
338     for (;;) {
339         if (!findSmallBumpRange(lineNumber)) {
340             page->setHasFreeLines(lock, false);
341             BASSERT(allocator.canAllocate());
342             return;
343         }
344
345         // In a fragmented page, some free ranges might not fit in the cache.
346         if (rangeCache.size() == rangeCache.capacity()) {
347             lineCache[sizeClass].push(page);
348             BASSERT(allocator.canAllocate());
349             return;
350         }
351
352         BumpRange bumpRange = allocateSmallBumpRange(lineNumber);
353         if (allocator.canAllocate())
354             rangeCache.push(bumpRange);
355         else
356             allocator.refill(bumpRange);
357     }
358 }
359
360 void Heap::allocateSmallBumpRangesByObject(
361     std::lock_guard<StaticMutex>& lock, size_t sizeClass,
362     BumpAllocator& allocator, BumpRangeCache& rangeCache,
363     LineCache& lineCache)
364 {
365     size_t size = allocator.size();
366     SmallPage* page = allocateSmallPage(lock, sizeClass, lineCache);
367     BASSERT(page->hasFreeLines(lock));
368
369     auto findSmallBumpRange = [&](Object& it, Object& end) {
370         for ( ; it + size <= end; it = it + size) {
371             if (!it.line()->refCount(lock))
372                 return true;
373         }
374         return false;
375     };
376
377     auto allocateSmallBumpRange = [&](Object& it, Object& end) -> BumpRange {
378         char* begin = it.address();
379         unsigned short objectCount = 0;
380         for ( ; it + size <= end; it = it + size) {
381             if (it.line()->refCount(lock))
382                 break;
383
384             ++objectCount;
385             it.line()->ref(lock);
386             it.page()->ref(lock);
387         }
388         return { begin, objectCount };
389     };
390
391     Object it(page->begin()->begin());
392     Object end(it + pageSize(m_pageClasses[sizeClass]));
393     for (;;) {
394         if (!findSmallBumpRange(it, end)) {
395             page->setHasFreeLines(lock, false);
396             BASSERT(allocator.canAllocate());
397             return;
398         }
399
400         // In a fragmented page, some free ranges might not fit in the cache.
401         if (rangeCache.size() == rangeCache.capacity()) {
402             lineCache[sizeClass].push(page);
403             BASSERT(allocator.canAllocate());
404             return;
405         }
406
407         BumpRange bumpRange = allocateSmallBumpRange(it, end);
408         if (allocator.canAllocate())
409             rangeCache.push(bumpRange);
410         else
411             allocator.refill(bumpRange);
412     }
413 }
414
415 LargeRange Heap::splitAndAllocate(LargeRange& range, size_t alignment, size_t size, AllocationKind allocationKind)
416 {
417     LargeRange prev;
418     LargeRange next;
419
420     size_t alignmentMask = alignment - 1;
421     if (test(range.begin(), alignmentMask)) {
422         size_t prefixSize = roundUpToMultipleOf(alignment, range.begin()) - range.begin();
423         std::pair<LargeRange, LargeRange> pair = range.split(prefixSize);
424         prev = pair.first;
425         range = pair.second;
426     }
427
428     if (range.size() - size > size / pageSizeWasteFactor) {
429         std::pair<LargeRange, LargeRange> pair = range.split(size);
430         range = pair.first;
431         next = pair.second;
432     }
433     
434     switch (allocationKind) {
435     case AllocationKind::Virtual:
436         if (range.physicalSize())
437             vmDeallocatePhysicalPagesSloppy(range.begin(), range.size());
438         break;
439         
440     case AllocationKind::Physical:
441         if (range.physicalSize() < range.size()) {
442             m_scavenger->scheduleIfUnderMemoryPressure(range.size());
443             
444             vmAllocatePhysicalPagesSloppy(range.begin() + range.physicalSize(), range.size() - range.physicalSize());
445             range.setPhysicalSize(range.size());
446         }
447         break;
448     }
449     
450     if (prev)
451         m_largeFree.add(prev);
452
453     if (next)
454         m_largeFree.add(next);
455
456     m_objectTypes.set(Chunk::get(range.begin()), ObjectType::Large);
457
458     m_largeAllocated.set(range.begin(), range.size());
459     return range;
460 }
461
462 void* Heap::tryAllocateLarge(std::lock_guard<StaticMutex>&, size_t alignment, size_t size, AllocationKind allocationKind)
463 {
464     BASSERT(isPowerOfTwo(alignment));
465     
466     if (m_debugHeap)
467         return m_debugHeap->memalignLarge(alignment, size, allocationKind);
468     
469     m_scavenger->didStartGrowing();
470     
471     size_t roundedSize = size ? roundUpToMultipleOf(largeAlignment, size) : largeAlignment;
472     if (roundedSize < size) // Check for overflow
473         return nullptr;
474     size = roundedSize;
475
476     size_t roundedAlignment = roundUpToMultipleOf<largeAlignment>(alignment);
477     if (roundedAlignment < alignment) // Check for overflow
478         return nullptr;
479     alignment = roundedAlignment;
480
481     LargeRange range = m_largeFree.remove(alignment, size);
482     if (!range) {
483         if (usingGigacage())
484             return nullptr;
485
486         range = PerProcess<VMHeap>::get()->tryAllocateLargeChunk(alignment, size, allocationKind);
487         if (!range)
488             return nullptr;
489         
490         m_largeFree.add(range);
491
492         range = m_largeFree.remove(alignment, size);
493     }
494
495     return splitAndAllocate(range, alignment, size, allocationKind).begin();
496 }
497
498 void* Heap::allocateLarge(std::lock_guard<StaticMutex>& lock, size_t alignment, size_t size, AllocationKind allocationKind)
499 {
500     void* result = tryAllocateLarge(lock, alignment, size, allocationKind);
501     RELEASE_BASSERT(result);
502     return result;
503 }
504
505 bool Heap::isLarge(std::lock_guard<StaticMutex>&, void* object)
506 {
507     return m_objectTypes.get(Object(object).chunk()) == ObjectType::Large;
508 }
509
510 size_t Heap::largeSize(std::lock_guard<StaticMutex>&, void* object)
511 {
512     return m_largeAllocated.get(object);
513 }
514
515 void Heap::shrinkLarge(std::lock_guard<StaticMutex>&, const Range& object, size_t newSize)
516 {
517     BASSERT(object.size() > newSize);
518
519     size_t size = m_largeAllocated.remove(object.begin());
520     LargeRange range = LargeRange(object, size);
521     splitAndAllocate(range, alignment, newSize, AllocationKind::Physical);
522
523     m_scavenger->schedule(size);
524 }
525
526 void Heap::deallocateLarge(std::lock_guard<StaticMutex>&, void* object, AllocationKind allocationKind)
527 {
528     if (m_debugHeap)
529         return m_debugHeap->freeLarge(object, allocationKind);
530
531     size_t size = m_largeAllocated.remove(object);
532     m_largeFree.add(LargeRange(object, size, allocationKind == AllocationKind::Physical ? size : 0));
533     m_scavenger->schedule(size);
534 }
535
536 } // namespace bmalloc