bmalloc: Refactored SegregatedFreeList and BoundaryTag::init
[WebKit-https.git] / Source / bmalloc / bmalloc / Heap.cpp
1 /*
2  * Copyright (C) 2014, 2015 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "Heap.h"
27 #include "LargeChunk.h"
28 #include "LargeObject.h"
29 #include "Line.h"
30 #include "MediumChunk.h"
31 #include "Page.h"
32 #include "PerProcess.h"
33 #include "SmallChunk.h"
34 #include <thread>
35
36 namespace bmalloc {
37
38 static inline void sleep(std::unique_lock<StaticMutex>& lock, std::chrono::milliseconds duration)
39 {
40     if (duration == std::chrono::milliseconds(0))
41         return;
42     
43     lock.unlock();
44     std::this_thread::sleep_for(duration);
45     lock.lock();
46 }
47
48 Heap::Heap(std::lock_guard<StaticMutex>&)
49     : m_isAllocatingPages(false)
50     , m_scavenger(*this, &Heap::concurrentScavenge)
51 {
52     initializeLineMetadata();
53 }
54
55 void Heap::initializeLineMetadata()
56 {
57     for (unsigned short size = alignment; size <= smallMax; size += alignment) {
58         unsigned short startOffset = 0;
59         for (size_t lineNumber = 0; lineNumber < SmallPage::lineCount - 1; ++lineNumber) {
60             unsigned short objectCount;
61             unsigned short remainder;
62             divideRoundingUp(static_cast<unsigned short>(SmallPage::lineSize - startOffset), size, objectCount, remainder);
63             BASSERT(objectCount);
64             m_smallLineMetadata[sizeClass(size)][lineNumber] = { startOffset, objectCount };
65             startOffset = remainder ? size - remainder : 0;
66         }
67
68         // The last line in the page rounds down instead of up because it's not allowed to overlap into its neighbor.
69         unsigned short objectCount = static_cast<unsigned short>((SmallPage::lineSize - startOffset) / size);
70         m_smallLineMetadata[sizeClass(size)][SmallPage::lineCount - 1] = { startOffset, objectCount };
71     }
72
73     for (unsigned short size = smallMax + alignment; size <= mediumMax; size += alignment) {
74         unsigned short startOffset = 0;
75         for (size_t lineNumber = 0; lineNumber < MediumPage::lineCount - 1; ++lineNumber) {
76             unsigned short objectCount;
77             unsigned short remainder;
78             divideRoundingUp(static_cast<unsigned short>(MediumPage::lineSize - startOffset), size, objectCount, remainder);
79             BASSERT(objectCount);
80             m_mediumLineMetadata[sizeClass(size)][lineNumber] = { startOffset, objectCount };
81             startOffset = remainder ? size - remainder : 0;
82         }
83
84         // The last line in the page rounds down instead of up because it's not allowed to overlap into its neighbor.
85         unsigned short objectCount = static_cast<unsigned short>((MediumPage::lineSize - startOffset) / size);
86         m_mediumLineMetadata[sizeClass(size)][MediumPage::lineCount - 1] = { startOffset, objectCount };
87     }
88 }
89
90 void Heap::concurrentScavenge()
91 {
92     std::unique_lock<StaticMutex> lock(PerProcess<Heap>::mutex());
93     scavenge(lock, scavengeSleepDuration);
94 }
95     
96 void Heap::scavenge(std::unique_lock<StaticMutex>& lock, std::chrono::milliseconds sleepDuration)
97 {
98     scavengeSmallPages(lock, sleepDuration);
99     scavengeMediumPages(lock, sleepDuration);
100     scavengeLargeRanges(lock, sleepDuration);
101
102     sleep(lock, sleepDuration);
103 }
104
105 void Heap::scavengeSmallPages(std::unique_lock<StaticMutex>& lock, std::chrono::milliseconds sleepDuration)
106 {
107     while (1) {
108         if (m_isAllocatingPages) {
109             m_isAllocatingPages = false;
110
111             sleep(lock, sleepDuration);
112             continue;
113         }
114
115         if (!m_smallPages.size())
116             return;
117         m_vmHeap.deallocateSmallPage(lock, m_smallPages.pop());
118     }
119 }
120
121 void Heap::scavengeMediumPages(std::unique_lock<StaticMutex>& lock, std::chrono::milliseconds sleepDuration)
122 {
123     while (1) {
124         if (m_isAllocatingPages) {
125             m_isAllocatingPages = false;
126
127             sleep(lock, sleepDuration);
128             continue;
129         }
130
131         if (!m_mediumPages.size())
132             return;
133         m_vmHeap.deallocateMediumPage(lock, m_mediumPages.pop());
134     }
135 }
136
137 void Heap::scavengeLargeRanges(std::unique_lock<StaticMutex>& lock, std::chrono::milliseconds sleepDuration)
138 {
139     while (1) {
140         if (m_isAllocatingPages) {
141             m_isAllocatingPages = false;
142
143             sleep(lock, sleepDuration);
144             continue;
145         }
146
147         LargeObject largeObject = m_largeObjects.takeGreedy(vmPageSize);
148         if (!largeObject)
149             return;
150         m_vmHeap.deallocateLargeRange(lock, largeObject);
151     }
152 }
153
154 void Heap::refillSmallBumpRangeCache(std::lock_guard<StaticMutex>& lock, size_t sizeClass, BumpRangeCache& rangeCache)
155 {
156     BASSERT(!rangeCache.size());
157     SmallPage* page = allocateSmallPage(lock, sizeClass);
158     SmallLine* lines = page->begin();
159
160     // Due to overlap from the previous line, the last line in the page may not be able to fit any objects.
161     size_t end = SmallPage::lineCount;
162     if (!m_smallLineMetadata[sizeClass][SmallPage::lineCount - 1].objectCount)
163         --end;
164
165     // Find a free line.
166     for (size_t lineNumber = 0; lineNumber < end; ++lineNumber) {
167         if (lines[lineNumber].refCount(lock))
168             continue;
169
170         LineMetadata& lineMetadata = m_smallLineMetadata[sizeClass][lineNumber];
171         char* begin = lines[lineNumber].begin() + lineMetadata.startOffset;
172         unsigned short objectCount = lineMetadata.objectCount;
173         lines[lineNumber].ref(lock, lineMetadata.objectCount);
174         page->ref(lock);
175
176         // Merge with subsequent free lines.
177         while (++lineNumber < end) {
178             if (lines[lineNumber].refCount(lock))
179                 break;
180
181             LineMetadata& lineMetadata = m_smallLineMetadata[sizeClass][lineNumber];
182             objectCount += lineMetadata.objectCount;
183             lines[lineNumber].ref(lock, lineMetadata.objectCount);
184             page->ref(lock);
185         }
186
187         rangeCache.push({ begin, objectCount });
188     }
189 }
190
191 void Heap::refillMediumBumpRangeCache(std::lock_guard<StaticMutex>& lock, size_t sizeClass, BumpRangeCache& rangeCache)
192 {
193     MediumPage* page = allocateMediumPage(lock, sizeClass);
194     BASSERT(!rangeCache.size());
195     MediumLine* lines = page->begin();
196
197     // Due to overlap from the previous line, the last line in the page may not be able to fit any objects.
198     size_t end = MediumPage::lineCount;
199     if (!m_mediumLineMetadata[sizeClass][MediumPage::lineCount - 1].objectCount)
200         --end;
201
202     // Find a free line.
203     for (size_t lineNumber = 0; lineNumber < end; ++lineNumber) {
204         if (lines[lineNumber].refCount(lock))
205             continue;
206
207         LineMetadata& lineMetadata = m_mediumLineMetadata[sizeClass][lineNumber];
208         char* begin = lines[lineNumber].begin() + lineMetadata.startOffset;
209         unsigned short objectCount = lineMetadata.objectCount;
210         lines[lineNumber].ref(lock, lineMetadata.objectCount);
211         page->ref(lock);
212         
213         // Merge with subsequent free lines.
214         while (++lineNumber < end) {
215             if (lines[lineNumber].refCount(lock))
216                 break;
217
218             LineMetadata& lineMetadata = m_mediumLineMetadata[sizeClass][lineNumber];
219             objectCount += lineMetadata.objectCount;
220             lines[lineNumber].ref(lock, lineMetadata.objectCount);
221             page->ref(lock);
222         }
223
224         rangeCache.push({ begin, objectCount });
225     }
226 }
227
228 SmallPage* Heap::allocateSmallPage(std::lock_guard<StaticMutex>& lock, size_t sizeClass)
229 {
230     Vector<SmallPage*>& smallPagesWithFreeLines = m_smallPagesWithFreeLines[sizeClass];
231     while (smallPagesWithFreeLines.size()) {
232         SmallPage* page = smallPagesWithFreeLines.pop();
233         if (!page->refCount(lock) || page->sizeClass() != sizeClass) // Page was promoted to the pages list.
234             continue;
235         return page;
236     }
237     
238     m_isAllocatingPages = true;
239
240     SmallPage* page = [this, sizeClass]() {
241         if (m_smallPages.size())
242             return m_smallPages.pop();
243         
244         SmallPage* page = m_vmHeap.allocateSmallPage();
245         vmAllocatePhysicalPages(page->begin()->begin(), vmPageSize);
246         return page;
247     }();
248
249     page->setSizeClass(sizeClass);
250     return page;
251 }
252
253 MediumPage* Heap::allocateMediumPage(std::lock_guard<StaticMutex>& lock, size_t sizeClass)
254 {
255     Vector<MediumPage*>& mediumPagesWithFreeLines = m_mediumPagesWithFreeLines[sizeClass];
256     while (mediumPagesWithFreeLines.size()) {
257         MediumPage* page = mediumPagesWithFreeLines.pop();
258         if (!page->refCount(lock) || page->sizeClass() != sizeClass) // Page was promoted to the pages list.
259             continue;
260         return page;
261     }
262     
263     m_isAllocatingPages = true;
264
265     MediumPage* page = [this, sizeClass]() {
266         if (m_mediumPages.size())
267             return m_mediumPages.pop();
268         
269         MediumPage* page = m_vmHeap.allocateMediumPage();
270         vmAllocatePhysicalPages(page->begin()->begin(), vmPageSize);
271         return page;
272     }();
273
274     page->setSizeClass(sizeClass);
275     return page;
276 }
277
278 void Heap::deallocateSmallLine(std::lock_guard<StaticMutex>& lock, SmallLine* line)
279 {
280     BASSERT(!line->refCount(lock));
281     SmallPage* page = SmallPage::get(line);
282     size_t refCount = page->refCount(lock);
283     page->deref(lock);
284
285     switch (refCount) {
286     case SmallPage::lineCount: {
287         // First free line in the page.
288         m_smallPagesWithFreeLines[page->sizeClass()].push(page);
289         break;
290     }
291     case 1: {
292         // Last free line in the page.
293         m_smallPages.push(page);
294         m_scavenger.run();
295         break;
296     }
297     }
298 }
299
300 void Heap::deallocateMediumLine(std::lock_guard<StaticMutex>& lock, MediumLine* line)
301 {
302     BASSERT(!line->refCount(lock));
303     MediumPage* page = MediumPage::get(line);
304     size_t refCount = page->refCount(lock);
305     page->deref(lock);
306
307     switch (refCount) {
308     case MediumPage::lineCount: {
309         // First free line in the page.
310         m_mediumPagesWithFreeLines[page->sizeClass()].push(page);
311         break;
312     }
313     case 1: {
314         // Last free line in the page.
315         m_mediumPages.push(page);
316         m_scavenger.run();
317         break;
318     }
319     }
320 }
321
322 void* Heap::allocateXLarge(std::lock_guard<StaticMutex>&, size_t alignment, size_t size)
323 {
324     BASSERT(isPowerOfTwo(alignment));
325     BASSERT(alignment >= xLargeAlignment);
326     BASSERT(size == roundUpToMultipleOf<xLargeAlignment>(size));
327
328     m_isAllocatingPages = true;
329
330     void* result = vmAllocate(alignment, size);
331     m_xLargeObjects.push(Range(result, size));
332     return result;
333 }
334
335 void* Heap::allocateXLarge(std::lock_guard<StaticMutex>& lock, size_t size)
336 {
337     return allocateXLarge(lock, superChunkSize, size);
338 }
339
340 Range Heap::findXLarge(std::lock_guard<StaticMutex>&, void* object)
341 {
342     for (auto& range : m_xLargeObjects) {
343         if (range.begin() != object)
344             continue;
345         return range;
346     }
347
348     return Range();
349 }
350
351 void Heap::deallocateXLarge(std::unique_lock<StaticMutex>& lock, void* object)
352 {
353     for (auto& range : m_xLargeObjects) {
354         if (range.begin() != object)
355             continue;
356
357         Range toDeallocate = m_xLargeObjects.pop(&range);
358
359         lock.unlock();
360         vmDeallocate(toDeallocate.begin(), toDeallocate.size());
361         lock.lock();
362         
363         break;
364     }
365 }
366
367 void* Heap::allocateLarge(std::lock_guard<StaticMutex>&, LargeObject& largeObject, size_t size)
368 {
369     BASSERT(largeObject.isFree());
370
371     if (largeObject.size() - size > largeMin) {
372         std::pair<LargeObject, LargeObject> split = largeObject.split(size);
373         largeObject = split.first;
374         m_largeObjects.insert(split.second);
375     }
376
377     largeObject.setFree(false);
378
379     if (!largeObject.hasPhysicalPages()) {
380         vmAllocatePhysicalPagesSloppy(largeObject.begin(), largeObject.size());
381         largeObject.setHasPhysicalPages(true);
382     }
383     
384     return largeObject.begin();
385 }
386
387 void* Heap::allocateLarge(std::lock_guard<StaticMutex>& lock, size_t size)
388 {
389     BASSERT(size <= largeMax);
390     BASSERT(size >= largeMin);
391     BASSERT(size == roundUpToMultipleOf<largeAlignment>(size));
392     
393     m_isAllocatingPages = true;
394
395     LargeObject largeObject = m_largeObjects.take(size);
396     if (!largeObject)
397         largeObject = m_vmHeap.allocateLargeRange(size);
398
399     return allocateLarge(lock, largeObject, size);
400 }
401
402 void* Heap::allocateLarge(std::lock_guard<StaticMutex>& lock, size_t alignment, size_t size, size_t unalignedSize)
403 {
404     BASSERT(size <= largeMax);
405     BASSERT(size >= largeMin);
406     BASSERT(size == roundUpToMultipleOf<largeAlignment>(size));
407     BASSERT(unalignedSize <= largeMax);
408     BASSERT(unalignedSize >= largeMin);
409     BASSERT(unalignedSize == roundUpToMultipleOf<largeAlignment>(unalignedSize));
410     BASSERT(alignment <= largeChunkSize / 2);
411     BASSERT(alignment >= largeAlignment);
412     BASSERT(isPowerOfTwo(alignment));
413
414     m_isAllocatingPages = true;
415
416     LargeObject largeObject = m_largeObjects.take(alignment, size, unalignedSize);
417     if (!largeObject)
418         largeObject = m_vmHeap.allocateLargeRange(alignment, size, unalignedSize);
419
420     size_t alignmentMask = alignment - 1;
421     if (!test(largeObject.begin(), alignmentMask))
422         return allocateLarge(lock, largeObject, size);
423
424     // Because we allocate VM left-to-right, we must explicitly allocate the
425     // unaligned space on the left in order to break off the aligned space
426     // we want in the middle.
427     size_t prefixSize = roundUpToMultipleOf(alignment, largeObject.begin() + largeMin) - largeObject.begin();
428     std::pair<LargeObject, LargeObject> pair = largeObject.split(prefixSize);
429     allocateLarge(lock, pair.first, prefixSize);
430     allocateLarge(lock, pair.second, size);
431     deallocateLarge(lock, pair.first);
432     return pair.second.begin();
433 }
434
435 void Heap::deallocateLarge(std::lock_guard<StaticMutex>&, const LargeObject& largeObject)
436 {
437     BASSERT(!largeObject.isFree());
438     largeObject.setFree(true);
439     
440     LargeObject merged = largeObject.merge();
441     m_largeObjects.insert(merged);
442     m_scavenger.run();
443 }
444
445 void Heap::deallocateLarge(std::lock_guard<StaticMutex>& lock, void* object)
446 {
447     LargeObject largeObject(object);
448     deallocateLarge(lock, largeObject);
449 }
450
451 } // namespace bmalloc