bcd5310e87a7f1feba06b11a738f1723834b738b
[WebKit-https.git] / Source / bmalloc / bmalloc / Heap.cpp
1 /*
2  * Copyright (C) 2014-2016 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "Heap.h"
27 #include "BumpAllocator.h"
28 #include "LargeChunk.h"
29 #include "LargeObject.h"
30 #include "Line.h"
31 #include "MediumChunk.h"
32 #include "Page.h"
33 #include "PerProcess.h"
34 #include "SmallChunk.h"
35 #include <thread>
36
37 namespace bmalloc {
38
39 Heap::Heap(std::lock_guard<StaticMutex>&)
40     : m_largeObjects(VMState::HasPhysical::True)
41     , m_isAllocatingPages(false)
42     , m_scavenger(*this, &Heap::concurrentScavenge)
43 {
44     initializeLineMetadata();
45 }
46
47 void Heap::initializeLineMetadata()
48 {
49     for (unsigned short size = alignment; size <= smallMax; size += alignment) {
50         unsigned short startOffset = 0;
51         for (size_t lineNumber = 0; lineNumber < SmallPage::lineCount - 1; ++lineNumber) {
52             unsigned short objectCount;
53             unsigned short remainder;
54             divideRoundingUp(static_cast<unsigned short>(SmallPage::lineSize - startOffset), size, objectCount, remainder);
55             BASSERT(objectCount);
56             m_smallLineMetadata[sizeClass(size)][lineNumber] = { startOffset, objectCount };
57             startOffset = remainder ? size - remainder : 0;
58         }
59
60         // The last line in the page rounds down instead of up because it's not allowed to overlap into its neighbor.
61         unsigned short objectCount = static_cast<unsigned short>((SmallPage::lineSize - startOffset) / size);
62         m_smallLineMetadata[sizeClass(size)][SmallPage::lineCount - 1] = { startOffset, objectCount };
63     }
64
65     for (unsigned short size = smallMax + alignment; size <= mediumMax; size += alignment) {
66         unsigned short startOffset = 0;
67         for (size_t lineNumber = 0; lineNumber < MediumPage::lineCount - 1; ++lineNumber) {
68             unsigned short objectCount;
69             unsigned short remainder;
70             divideRoundingUp(static_cast<unsigned short>(MediumPage::lineSize - startOffset), size, objectCount, remainder);
71             BASSERT(objectCount);
72             m_mediumLineMetadata[sizeClass(size)][lineNumber] = { startOffset, objectCount };
73             startOffset = remainder ? size - remainder : 0;
74         }
75
76         // The last line in the page rounds down instead of up because it's not allowed to overlap into its neighbor.
77         unsigned short objectCount = static_cast<unsigned short>((MediumPage::lineSize - startOffset) / size);
78         m_mediumLineMetadata[sizeClass(size)][MediumPage::lineCount - 1] = { startOffset, objectCount };
79     }
80 }
81
82 void Heap::concurrentScavenge()
83 {
84     std::unique_lock<StaticMutex> lock(PerProcess<Heap>::mutex());
85     scavenge(lock, scavengeSleepDuration);
86 }
87
88 void Heap::scavenge(std::unique_lock<StaticMutex>& lock, std::chrono::milliseconds sleepDuration)
89 {
90     waitUntilFalse(lock, sleepDuration, m_isAllocatingPages);
91
92     scavengeSmallPages(lock, sleepDuration);
93     scavengeMediumPages(lock, sleepDuration);
94     scavengeLargeObjects(lock, sleepDuration);
95
96     sleep(lock, sleepDuration);
97 }
98
99 void Heap::scavengeSmallPages(std::unique_lock<StaticMutex>& lock, std::chrono::milliseconds sleepDuration)
100 {
101     while (m_smallPages.size()) {
102         m_vmHeap.deallocateSmallPage(lock, m_smallPages.pop());
103         waitUntilFalse(lock, sleepDuration, m_isAllocatingPages);
104     }
105 }
106
107 void Heap::scavengeMediumPages(std::unique_lock<StaticMutex>& lock, std::chrono::milliseconds sleepDuration)
108 {
109     while (m_mediumPages.size()) {
110         m_vmHeap.deallocateMediumPage(lock, m_mediumPages.pop());
111         waitUntilFalse(lock, sleepDuration, m_isAllocatingPages);
112     }
113 }
114
115 void Heap::scavengeLargeObjects(std::unique_lock<StaticMutex>& lock, std::chrono::milliseconds sleepDuration)
116 {
117     while (LargeObject largeObject = m_largeObjects.takeGreedy()) {
118         m_vmHeap.deallocateLargeObject(lock, largeObject);
119         waitUntilFalse(lock, sleepDuration, m_isAllocatingPages);
120     }
121 }
122
123 void Heap::allocateSmallBumpRanges(std::lock_guard<StaticMutex>& lock, size_t sizeClass, BumpAllocator& allocator, BumpRangeCache& rangeCache)
124 {
125     BASSERT(!rangeCache.size());
126     SmallPage* page = allocateSmallPage(lock, sizeClass);
127     SmallLine* lines = page->begin();
128
129     // Due to overlap from the previous line, the last line in the page may not be able to fit any objects.
130     size_t end = SmallPage::lineCount;
131     if (!m_smallLineMetadata[sizeClass][SmallPage::lineCount - 1].objectCount)
132         --end;
133
134     // Find a free line.
135     for (size_t lineNumber = 0; lineNumber < end; ++lineNumber) {
136         if (lines[lineNumber].refCount(lock))
137             continue;
138
139         // In a fragmented page, some free ranges might not fit in the cache.
140         if (rangeCache.size() == rangeCache.capacity()) {
141             m_smallPagesWithFreeLines[sizeClass].push(page);
142             return;
143         }
144
145         LineMetadata& lineMetadata = m_smallLineMetadata[sizeClass][lineNumber];
146         char* begin = lines[lineNumber].begin() + lineMetadata.startOffset;
147         unsigned short objectCount = lineMetadata.objectCount;
148         lines[lineNumber].ref(lock, lineMetadata.objectCount);
149         page->ref(lock);
150
151         // Merge with subsequent free lines.
152         while (++lineNumber < end) {
153             if (lines[lineNumber].refCount(lock))
154                 break;
155
156             LineMetadata& lineMetadata = m_smallLineMetadata[sizeClass][lineNumber];
157             objectCount += lineMetadata.objectCount;
158             lines[lineNumber].ref(lock, lineMetadata.objectCount);
159             page->ref(lock);
160         }
161
162         if (!allocator.canAllocate())
163             allocator.refill({ begin, objectCount });
164         else
165             rangeCache.push({ begin, objectCount });
166     }
167 }
168
169 void Heap::allocateMediumBumpRanges(std::lock_guard<StaticMutex>& lock, size_t sizeClass, BumpAllocator& allocator, BumpRangeCache& rangeCache)
170 {
171     MediumPage* page = allocateMediumPage(lock, sizeClass);
172     BASSERT(!rangeCache.size());
173     MediumLine* lines = page->begin();
174
175     // Due to overlap from the previous line, the last line in the page may not be able to fit any objects.
176     size_t end = MediumPage::lineCount;
177     if (!m_mediumLineMetadata[sizeClass][MediumPage::lineCount - 1].objectCount)
178         --end;
179
180     // Find a free line.
181     for (size_t lineNumber = 0; lineNumber < end; ++lineNumber) {
182         if (lines[lineNumber].refCount(lock))
183             continue;
184
185         // In a fragmented page, some free ranges might not fit in the cache.
186         if (rangeCache.size() == rangeCache.capacity()) {
187             m_mediumPagesWithFreeLines[sizeClass].push(page);
188             return;
189         }
190
191         LineMetadata& lineMetadata = m_mediumLineMetadata[sizeClass][lineNumber];
192         char* begin = lines[lineNumber].begin() + lineMetadata.startOffset;
193         unsigned short objectCount = lineMetadata.objectCount;
194         lines[lineNumber].ref(lock, lineMetadata.objectCount);
195         page->ref(lock);
196         
197         // Merge with subsequent free lines.
198         while (++lineNumber < end) {
199             if (lines[lineNumber].refCount(lock))
200                 break;
201
202             LineMetadata& lineMetadata = m_mediumLineMetadata[sizeClass][lineNumber];
203             objectCount += lineMetadata.objectCount;
204             lines[lineNumber].ref(lock, lineMetadata.objectCount);
205             page->ref(lock);
206         }
207
208         if (!allocator.canAllocate())
209             allocator.refill({ begin, objectCount });
210         else
211             rangeCache.push({ begin, objectCount });
212     }
213 }
214
215 SmallPage* Heap::allocateSmallPage(std::lock_guard<StaticMutex>& lock, size_t sizeClass)
216 {
217     Vector<SmallPage*>& smallPagesWithFreeLines = m_smallPagesWithFreeLines[sizeClass];
218     while (smallPagesWithFreeLines.size()) {
219         SmallPage* page = smallPagesWithFreeLines.pop();
220         if (!page->refCount(lock) || page->sizeClass() != sizeClass) // Page was promoted to the pages list.
221             continue;
222         return page;
223     }
224
225     SmallPage* page = [this, sizeClass]() {
226         if (m_smallPages.size())
227             return m_smallPages.pop();
228
229         m_isAllocatingPages = true;
230         return m_vmHeap.allocateSmallPage();
231     }();
232
233     page->setSizeClass(sizeClass);
234     return page;
235 }
236
237 MediumPage* Heap::allocateMediumPage(std::lock_guard<StaticMutex>& lock, size_t sizeClass)
238 {
239     Vector<MediumPage*>& mediumPagesWithFreeLines = m_mediumPagesWithFreeLines[sizeClass];
240     while (mediumPagesWithFreeLines.size()) {
241         MediumPage* page = mediumPagesWithFreeLines.pop();
242         if (!page->refCount(lock) || page->sizeClass() != sizeClass) // Page was promoted to the pages list.
243             continue;
244         return page;
245     }
246
247     MediumPage* page = [this, sizeClass]() {
248         if (m_mediumPages.size())
249             return m_mediumPages.pop();
250
251         m_isAllocatingPages = true;
252         return m_vmHeap.allocateMediumPage();
253     }();
254
255     page->setSizeClass(sizeClass);
256     return page;
257 }
258
259 void Heap::deallocateSmallLine(std::lock_guard<StaticMutex>& lock, SmallLine* line)
260 {
261     BASSERT(!line->refCount(lock));
262     SmallPage* page = SmallPage::get(line);
263     size_t refCount = page->refCount(lock);
264     page->deref(lock);
265
266     switch (refCount) {
267     case SmallPage::lineCount: {
268         // First free line in the page.
269         m_smallPagesWithFreeLines[page->sizeClass()].push(page);
270         break;
271     }
272     case 1: {
273         // Last free line in the page.
274         m_smallPages.push(page);
275         m_scavenger.run();
276         break;
277     }
278     }
279 }
280
281 void Heap::deallocateMediumLine(std::lock_guard<StaticMutex>& lock, MediumLine* line)
282 {
283     BASSERT(!line->refCount(lock));
284     MediumPage* page = MediumPage::get(line);
285     size_t refCount = page->refCount(lock);
286     page->deref(lock);
287
288     switch (refCount) {
289     case MediumPage::lineCount: {
290         // First free line in the page.
291         m_mediumPagesWithFreeLines[page->sizeClass()].push(page);
292         break;
293     }
294     case 1: {
295         // Last free line in the page.
296         m_mediumPages.push(page);
297         m_scavenger.run();
298         break;
299     }
300     }
301 }
302
303 void* Heap::allocateXLarge(std::lock_guard<StaticMutex>& lock, size_t alignment, size_t size)
304 {
305     void* result = tryAllocateXLarge(lock, alignment, size);
306     RELEASE_BASSERT(result);
307     return result;
308 }
309
310 void* Heap::allocateXLarge(std::lock_guard<StaticMutex>& lock, size_t size)
311 {
312     return allocateXLarge(lock, superChunkSize, size);
313 }
314
315 void* Heap::tryAllocateXLarge(std::lock_guard<StaticMutex>&, size_t alignment, size_t size)
316 {
317     BASSERT(isPowerOfTwo(alignment));
318     BASSERT(alignment >= superChunkSize);
319     BASSERT(size == roundUpToMultipleOf<xLargeAlignment>(size));
320
321     void* result = tryVMAllocate(alignment, size);
322     if (!result)
323         return nullptr;
324     m_xLargeObjects.push(Range(result, size));
325     return result;
326 }
327
328 Range& Heap::findXLarge(std::unique_lock<StaticMutex>&, void* object)
329 {
330     for (auto& range : m_xLargeObjects) {
331         if (range.begin() != object)
332             continue;
333         return range;
334     }
335
336     RELEASE_BASSERT(false);
337     return *static_cast<Range*>(nullptr); // Silence compiler error.
338 }
339
340 void Heap::deallocateXLarge(std::unique_lock<StaticMutex>& lock, void* object)
341 {
342     Range toDeallocate = m_xLargeObjects.pop(&findXLarge(lock, object));
343
344     lock.unlock();
345     vmDeallocate(toDeallocate.begin(), toDeallocate.size());
346     lock.lock();
347 }
348
349 inline LargeObject& Heap::splitAndAllocate(LargeObject& largeObject, size_t size)
350 {
351     BASSERT(largeObject.isFree());
352
353     LargeObject nextLargeObject;
354
355     if (largeObject.size() - size > largeMin) {
356         std::pair<LargeObject, LargeObject> split = largeObject.split(size);
357         largeObject = split.first;
358         nextLargeObject = split.second;
359     }
360
361     largeObject.setFree(false);
362
363     if (nextLargeObject) {
364         BASSERT(!nextLargeObject.nextCanMerge());
365         m_largeObjects.insert(nextLargeObject);
366     }
367
368     return largeObject;
369 }
370
371 inline LargeObject& Heap::splitAndAllocate(LargeObject& largeObject, size_t alignment, size_t size)
372 {
373     LargeObject prevLargeObject;
374     LargeObject nextLargeObject;
375
376     size_t alignmentMask = alignment - 1;
377     if (test(largeObject.begin(), alignmentMask)) {
378         size_t prefixSize = roundUpToMultipleOf(alignment, largeObject.begin() + largeMin) - largeObject.begin();
379         std::pair<LargeObject, LargeObject> pair = largeObject.split(prefixSize);
380         prevLargeObject = pair.first;
381         largeObject = pair.second;
382     }
383
384     BASSERT(largeObject.isFree());
385
386     if (largeObject.size() - size > largeMin) {
387         std::pair<LargeObject, LargeObject> split = largeObject.split(size);
388         largeObject = split.first;
389         nextLargeObject = split.second;
390     }
391
392     largeObject.setFree(false);
393
394     if (prevLargeObject) {
395         LargeObject merged = prevLargeObject.merge();
396         m_largeObjects.insert(merged);
397     }
398
399     if (nextLargeObject) {
400         LargeObject merged = nextLargeObject.merge();
401         m_largeObjects.insert(merged);
402     }
403
404     return largeObject;
405 }
406
407 void* Heap::allocateLarge(std::lock_guard<StaticMutex>&, size_t size)
408 {
409     BASSERT(size <= largeMax);
410     BASSERT(size >= largeMin);
411     BASSERT(size == roundUpToMultipleOf<largeAlignment>(size));
412
413     LargeObject largeObject = m_largeObjects.take(size);
414     if (!largeObject)
415         largeObject = m_vmHeap.allocateLargeObject(size);
416
417     if (largeObject.vmState().hasVirtual()) {
418         m_isAllocatingPages = true;
419         // We commit before we split in order to avoid split/merge commit/decommit churn.
420         vmAllocatePhysicalPagesSloppy(largeObject.begin(), largeObject.size());
421         largeObject.setVMState(VMState::Physical);
422     }
423
424     largeObject = splitAndAllocate(largeObject, size);
425
426     return largeObject.begin();
427 }
428
429 void* Heap::allocateLarge(std::lock_guard<StaticMutex>&, size_t alignment, size_t size, size_t unalignedSize)
430 {
431     BASSERT(size <= largeMax);
432     BASSERT(size >= largeMin);
433     BASSERT(size == roundUpToMultipleOf<largeAlignment>(size));
434     BASSERT(unalignedSize <= largeMax);
435     BASSERT(unalignedSize >= largeMin);
436     BASSERT(unalignedSize == roundUpToMultipleOf<largeAlignment>(unalignedSize));
437     BASSERT(alignment <= largeChunkSize / 2);
438     BASSERT(alignment >= largeAlignment);
439     BASSERT(isPowerOfTwo(alignment));
440
441     LargeObject largeObject = m_largeObjects.take(alignment, size, unalignedSize);
442     if (!largeObject)
443         largeObject = m_vmHeap.allocateLargeObject(alignment, size, unalignedSize);
444
445     if (largeObject.vmState().hasVirtual()) {
446         m_isAllocatingPages = true;
447         // We commit before we split in order to avoid split/merge commit/decommit churn.
448         vmAllocatePhysicalPagesSloppy(largeObject.begin(), largeObject.size());
449         largeObject.setVMState(VMState::Physical);
450     }
451
452     largeObject = splitAndAllocate(largeObject, alignment, size);
453
454     return largeObject.begin();
455 }
456
457 void Heap::deallocateLarge(std::lock_guard<StaticMutex>&, const LargeObject& largeObject)
458 {
459     BASSERT(!largeObject.isFree());
460     largeObject.setFree(true);
461     
462     LargeObject merged = largeObject.merge();
463     m_largeObjects.insert(merged);
464     m_scavenger.run();
465 }
466
467 void Heap::deallocateLarge(std::lock_guard<StaticMutex>& lock, void* object)
468 {
469     LargeObject largeObject(object);
470     deallocateLarge(lock, largeObject);
471 }
472
473 } // namespace bmalloc