bmalloc: Remove the concept of medium objects
[WebKit-https.git] / Source / bmalloc / bmalloc / Heap.cpp
1 /*
2  * Copyright (C) 2014-2016 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "Heap.h"
27 #include "BumpAllocator.h"
28 #include "LargeChunk.h"
29 #include "LargeObject.h"
30 #include "Line.h"
31 #include "Page.h"
32 #include "PerProcess.h"
33 #include "SmallChunk.h"
34 #include <thread>
35
36 namespace bmalloc {
37
38 Heap::Heap(std::lock_guard<StaticMutex>&)
39     : m_largeObjects(VMState::HasPhysical::True)
40     , m_isAllocatingPages(false)
41     , m_scavenger(*this, &Heap::concurrentScavenge)
42 {
43     initializeLineMetadata();
44 }
45
46 void Heap::initializeLineMetadata()
47 {
48     // We assume that m_smallLineMetadata is zero-filled.
49
50     for (size_t size = alignment; size <= smallMax; size += alignment) {
51         size_t sizeClass = bmalloc::sizeClass(size);
52         auto& metadata = m_smallLineMetadata[sizeClass];
53
54         size_t object = 0;
55         size_t line = 0;
56         while (object < vmPageSize) {
57             line = object / SmallPage::lineSize;
58             size_t leftover = object % SmallPage::lineSize;
59
60             size_t objectCount;
61             size_t remainder;
62             divideRoundingUp(SmallPage::lineSize - leftover, size, objectCount, remainder);
63
64             metadata[line] = { static_cast<unsigned short>(leftover), static_cast<unsigned short>(objectCount) };
65
66             object += objectCount * size;
67         }
68
69         // Don't allow the last object in a page to escape the page.
70         if (object > vmPageSize) {
71             BASSERT(metadata[line].objectCount);
72             --metadata[line].objectCount;
73         }
74     }
75 }
76
77 void Heap::concurrentScavenge()
78 {
79     std::unique_lock<StaticMutex> lock(PerProcess<Heap>::mutex());
80     scavenge(lock, scavengeSleepDuration);
81 }
82
83 void Heap::scavenge(std::unique_lock<StaticMutex>& lock, std::chrono::milliseconds sleepDuration)
84 {
85     waitUntilFalse(lock, sleepDuration, m_isAllocatingPages);
86
87     scavengeSmallPages(lock, sleepDuration);
88     scavengeLargeObjects(lock, sleepDuration);
89
90     sleep(lock, sleepDuration);
91 }
92
93 void Heap::scavengeSmallPages(std::unique_lock<StaticMutex>& lock, std::chrono::milliseconds sleepDuration)
94 {
95     while (m_smallPages.size()) {
96         m_vmHeap.deallocateSmallPage(lock, m_smallPages.pop());
97         waitUntilFalse(lock, sleepDuration, m_isAllocatingPages);
98     }
99 }
100
101 void Heap::scavengeLargeObjects(std::unique_lock<StaticMutex>& lock, std::chrono::milliseconds sleepDuration)
102 {
103     while (LargeObject largeObject = m_largeObjects.takeGreedy()) {
104         m_vmHeap.deallocateLargeObject(lock, largeObject);
105         waitUntilFalse(lock, sleepDuration, m_isAllocatingPages);
106     }
107 }
108
109 void Heap::allocateSmallBumpRanges(std::lock_guard<StaticMutex>& lock, size_t sizeClass, BumpAllocator& allocator, BumpRangeCache& rangeCache)
110 {
111     BASSERT(!rangeCache.size());
112     SmallPage* page = allocateSmallPage(lock, sizeClass);
113     SmallLine* lines = page->begin();
114     BASSERT(page->hasFreeLines(lock));
115
116     // Find a free line.
117     for (size_t lineNumber = 0; lineNumber < SmallPage::lineCount; ++lineNumber) {
118         if (lines[lineNumber].refCount(lock))
119             continue;
120
121         LineMetadata& lineMetadata = m_smallLineMetadata[sizeClass][lineNumber];
122         if (!lineMetadata.objectCount)
123             continue;
124
125         // In a fragmented page, some free ranges might not fit in the cache.
126         if (rangeCache.size() == rangeCache.capacity()) {
127             m_smallPagesWithFreeLines[sizeClass].push(page);
128             return;
129         }
130
131         char* begin = lines[lineNumber].begin() + lineMetadata.startOffset;
132         unsigned short objectCount = lineMetadata.objectCount;
133         lines[lineNumber].ref(lock, lineMetadata.objectCount);
134         page->ref(lock);
135
136         // Merge with subsequent free lines.
137         while (++lineNumber < SmallPage::lineCount) {
138             if (lines[lineNumber].refCount(lock))
139                 break;
140
141             LineMetadata& lineMetadata = m_smallLineMetadata[sizeClass][lineNumber];
142             if (!lineMetadata.objectCount)
143                 continue;
144
145             objectCount += lineMetadata.objectCount;
146             lines[lineNumber].ref(lock, lineMetadata.objectCount);
147             page->ref(lock);
148         }
149
150         if (!allocator.canAllocate())
151             allocator.refill({ begin, objectCount });
152         else
153             rangeCache.push({ begin, objectCount });
154     }
155
156     page->setHasFreeLines(lock, false);
157 }
158
159 SmallPage* Heap::allocateSmallPage(std::lock_guard<StaticMutex>& lock, size_t sizeClass)
160 {
161     Vector<SmallPage*>& smallPagesWithFreeLines = m_smallPagesWithFreeLines[sizeClass];
162     while (smallPagesWithFreeLines.size()) {
163         SmallPage* page = smallPagesWithFreeLines.pop();
164         if (!page->refCount(lock) || page->sizeClass() != sizeClass) // Page was promoted to the pages list.
165             continue;
166         return page;
167     }
168
169     SmallPage* page = [this, &lock]() {
170         if (m_smallPages.size())
171             return m_smallPages.pop();
172
173         m_isAllocatingPages = true;
174         SmallPage* page = m_vmHeap.allocateSmallPage(lock);
175         return page;
176     }();
177
178     page->setSizeClass(sizeClass);
179     return page;
180 }
181
182 void Heap::deallocateSmallLine(std::lock_guard<StaticMutex>& lock, SmallLine* line)
183 {
184     BASSERT(!line->refCount(lock));
185     SmallPage* page = SmallPage::get(line);
186     page->deref(lock);
187
188     if (!page->hasFreeLines(lock)) {
189         page->setHasFreeLines(lock, true);
190         m_smallPagesWithFreeLines[page->sizeClass()].push(page);
191
192         BASSERT(page->refCount(lock));
193         return;
194     }
195
196     if (page->refCount(lock))
197         return;
198
199     m_smallPages.push(page);
200     m_scavenger.run();
201 }
202
203 void* Heap::allocateXLarge(std::lock_guard<StaticMutex>& lock, size_t alignment, size_t size)
204 {
205     void* result = tryAllocateXLarge(lock, alignment, size);
206     RELEASE_BASSERT(result);
207     return result;
208 }
209
210 void* Heap::allocateXLarge(std::lock_guard<StaticMutex>& lock, size_t size)
211 {
212     return allocateXLarge(lock, superChunkSize, size);
213 }
214
215 void* Heap::tryAllocateXLarge(std::lock_guard<StaticMutex>&, size_t alignment, size_t size)
216 {
217     BASSERT(isPowerOfTwo(alignment));
218     BASSERT(alignment >= superChunkSize);
219     BASSERT(size == roundUpToMultipleOf<xLargeAlignment>(size));
220
221     void* result = tryVMAllocate(alignment, size);
222     if (!result)
223         return nullptr;
224     m_xLargeObjects.push(Range(result, size));
225     return result;
226 }
227
228 Range& Heap::findXLarge(std::unique_lock<StaticMutex>&, void* object)
229 {
230     for (auto& range : m_xLargeObjects) {
231         if (range.begin() != object)
232             continue;
233         return range;
234     }
235
236     RELEASE_BASSERT(false);
237     return *static_cast<Range*>(nullptr); // Silence compiler error.
238 }
239
240 void Heap::deallocateXLarge(std::unique_lock<StaticMutex>& lock, void* object)
241 {
242     Range toDeallocate = m_xLargeObjects.pop(&findXLarge(lock, object));
243
244     lock.unlock();
245     vmDeallocate(toDeallocate.begin(), toDeallocate.size());
246     lock.lock();
247 }
248
249 inline LargeObject& Heap::splitAndAllocate(LargeObject& largeObject, size_t size)
250 {
251     BASSERT(largeObject.isFree());
252
253     LargeObject nextLargeObject;
254
255     if (largeObject.size() - size > largeMin) {
256         std::pair<LargeObject, LargeObject> split = largeObject.split(size);
257         largeObject = split.first;
258         nextLargeObject = split.second;
259     }
260
261     largeObject.setFree(false);
262
263     if (nextLargeObject) {
264         BASSERT(!nextLargeObject.nextCanMerge());
265         m_largeObjects.insert(nextLargeObject);
266     }
267
268     return largeObject;
269 }
270
271 inline LargeObject& Heap::splitAndAllocate(LargeObject& largeObject, size_t alignment, size_t size)
272 {
273     LargeObject prevLargeObject;
274     LargeObject nextLargeObject;
275
276     size_t alignmentMask = alignment - 1;
277     if (test(largeObject.begin(), alignmentMask)) {
278         size_t prefixSize = roundUpToMultipleOf(alignment, largeObject.begin() + largeMin) - largeObject.begin();
279         std::pair<LargeObject, LargeObject> pair = largeObject.split(prefixSize);
280         prevLargeObject = pair.first;
281         largeObject = pair.second;
282     }
283
284     BASSERT(largeObject.isFree());
285
286     if (largeObject.size() - size > largeMin) {
287         std::pair<LargeObject, LargeObject> split = largeObject.split(size);
288         largeObject = split.first;
289         nextLargeObject = split.second;
290     }
291
292     largeObject.setFree(false);
293
294     if (prevLargeObject) {
295         LargeObject merged = prevLargeObject.merge();
296         m_largeObjects.insert(merged);
297     }
298
299     if (nextLargeObject) {
300         LargeObject merged = nextLargeObject.merge();
301         m_largeObjects.insert(merged);
302     }
303
304     return largeObject;
305 }
306
307 void* Heap::allocateLarge(std::lock_guard<StaticMutex>&, size_t size)
308 {
309     BASSERT(size <= largeMax);
310     BASSERT(size >= largeMin);
311     BASSERT(size == roundUpToMultipleOf<largeAlignment>(size));
312
313     LargeObject largeObject = m_largeObjects.take(size);
314     if (!largeObject)
315         largeObject = m_vmHeap.allocateLargeObject(size);
316
317     if (largeObject.vmState().hasVirtual()) {
318         m_isAllocatingPages = true;
319         // We commit before we split in order to avoid split/merge commit/decommit churn.
320         vmAllocatePhysicalPagesSloppy(largeObject.begin(), largeObject.size());
321         largeObject.setVMState(VMState::Physical);
322     }
323
324     largeObject = splitAndAllocate(largeObject, size);
325
326     return largeObject.begin();
327 }
328
329 void* Heap::allocateLarge(std::lock_guard<StaticMutex>&, size_t alignment, size_t size, size_t unalignedSize)
330 {
331     BASSERT(size <= largeMax);
332     BASSERT(size >= largeMin);
333     BASSERT(size == roundUpToMultipleOf<largeAlignment>(size));
334     BASSERT(unalignedSize <= largeMax);
335     BASSERT(unalignedSize >= largeMin);
336     BASSERT(unalignedSize == roundUpToMultipleOf<largeAlignment>(unalignedSize));
337     BASSERT(alignment <= largeChunkSize / 2);
338     BASSERT(alignment >= largeAlignment);
339     BASSERT(isPowerOfTwo(alignment));
340
341     LargeObject largeObject = m_largeObjects.take(alignment, size, unalignedSize);
342     if (!largeObject)
343         largeObject = m_vmHeap.allocateLargeObject(alignment, size, unalignedSize);
344
345     if (largeObject.vmState().hasVirtual()) {
346         m_isAllocatingPages = true;
347         // We commit before we split in order to avoid split/merge commit/decommit churn.
348         vmAllocatePhysicalPagesSloppy(largeObject.begin(), largeObject.size());
349         largeObject.setVMState(VMState::Physical);
350     }
351
352     largeObject = splitAndAllocate(largeObject, alignment, size);
353
354     return largeObject.begin();
355 }
356
357 void Heap::deallocateLarge(std::lock_guard<StaticMutex>&, const LargeObject& largeObject)
358 {
359     BASSERT(!largeObject.isFree());
360     largeObject.setFree(true);
361     
362     LargeObject merged = largeObject.merge();
363     m_largeObjects.insert(merged);
364     m_scavenger.run();
365 }
366
367 void Heap::deallocateLarge(std::lock_guard<StaticMutex>& lock, void* object)
368 {
369     LargeObject largeObject(object);
370     deallocateLarge(lock, largeObject);
371 }
372
373 } // namespace bmalloc