bmalloc: Don't use a whole page for metadata
[WebKit-https.git] / Source / bmalloc / bmalloc / Heap.cpp
1 /*
2  * Copyright (C) 2014-2016 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "Heap.h"
27 #include "BumpAllocator.h"
28 #include "LargeChunk.h"
29 #include "LargeObject.h"
30 #include "PerProcess.h"
31 #include "SmallChunk.h"
32 #include "SmallLine.h"
33 #include "SmallPage.h"
34 #include <thread>
35
36 namespace bmalloc {
37
38 Heap::Heap(std::lock_guard<StaticMutex>&)
39     : m_largeObjects(VMState::HasPhysical::True)
40     , m_isAllocatingPages(false)
41     , m_scavenger(*this, &Heap::concurrentScavenge)
42 {
43     initializeLineMetadata();
44 }
45
46 void Heap::initializeLineMetadata()
47 {
48     // We assume that m_smallLineMetadata is zero-filled.
49
50     for (size_t size = alignment; size <= smallMax; size += alignment) {
51         size_t sizeClass = bmalloc::sizeClass(size);
52         auto& metadata = m_smallLineMetadata[sizeClass];
53
54         size_t object = 0;
55         size_t line = 0;
56         while (object < vmPageSize) {
57             line = object / smallLineSize;
58             size_t leftover = object % smallLineSize;
59
60             size_t objectCount;
61             size_t remainder;
62             divideRoundingUp(smallLineSize - leftover, size, objectCount, remainder);
63
64             metadata[line] = { static_cast<unsigned short>(leftover), static_cast<unsigned short>(objectCount) };
65
66             object += objectCount * size;
67         }
68
69         // Don't allow the last object in a page to escape the page.
70         if (object > vmPageSize) {
71             BASSERT(metadata[line].objectCount);
72             --metadata[line].objectCount;
73         }
74     }
75 }
76
77 void Heap::concurrentScavenge()
78 {
79     std::unique_lock<StaticMutex> lock(PerProcess<Heap>::mutex());
80     scavenge(lock, scavengeSleepDuration);
81 }
82
83 void Heap::scavenge(std::unique_lock<StaticMutex>& lock, std::chrono::milliseconds sleepDuration)
84 {
85     waitUntilFalse(lock, sleepDuration, m_isAllocatingPages);
86
87     scavengeSmallPages(lock, sleepDuration);
88     scavengeLargeObjects(lock, sleepDuration);
89
90     sleep(lock, sleepDuration);
91 }
92
93 void Heap::scavengeSmallPages(std::unique_lock<StaticMutex>& lock, std::chrono::milliseconds sleepDuration)
94 {
95     while (m_smallPages.size()) {
96         m_vmHeap.deallocateSmallPage(lock, m_smallPages.pop());
97         waitUntilFalse(lock, sleepDuration, m_isAllocatingPages);
98     }
99 }
100
101 void Heap::scavengeLargeObjects(std::unique_lock<StaticMutex>& lock, std::chrono::milliseconds sleepDuration)
102 {
103     while (LargeObject largeObject = m_largeObjects.takeGreedy()) {
104         m_vmHeap.deallocateLargeObject(lock, largeObject);
105         waitUntilFalse(lock, sleepDuration, m_isAllocatingPages);
106     }
107 }
108
109 void Heap::allocateSmallBumpRanges(std::lock_guard<StaticMutex>& lock, size_t sizeClass, BumpAllocator& allocator, BumpRangeCache& rangeCache)
110 {
111     BASSERT(!rangeCache.size());
112     SmallPage* page = allocateSmallPage(lock, sizeClass);
113     SmallLine* lines = page->begin();
114     BASSERT(page->hasFreeLines(lock));
115
116     // Find a free line.
117     for (size_t lineNumber = 0; lineNumber < smallLineCount; ++lineNumber) {
118         if (lines[lineNumber].refCount(lock))
119             continue;
120
121         LineMetadata& lineMetadata = m_smallLineMetadata[sizeClass][lineNumber];
122         if (!lineMetadata.objectCount)
123             continue;
124
125         // In a fragmented page, some free ranges might not fit in the cache.
126         if (rangeCache.size() == rangeCache.capacity()) {
127             m_smallPagesWithFreeLines[sizeClass].push(page);
128             BASSERT(allocator.canAllocate());
129             return;
130         }
131
132         char* begin = lines[lineNumber].begin() + lineMetadata.startOffset;
133         unsigned short objectCount = lineMetadata.objectCount;
134         lines[lineNumber].ref(lock, lineMetadata.objectCount);
135         page->ref(lock);
136
137         // Merge with subsequent free lines.
138         while (++lineNumber < smallLineCount) {
139             if (lines[lineNumber].refCount(lock))
140                 break;
141
142             LineMetadata& lineMetadata = m_smallLineMetadata[sizeClass][lineNumber];
143             if (!lineMetadata.objectCount)
144                 continue;
145
146             objectCount += lineMetadata.objectCount;
147             lines[lineNumber].ref(lock, lineMetadata.objectCount);
148             page->ref(lock);
149         }
150
151         if (!allocator.canAllocate())
152             allocator.refill({ begin, objectCount });
153         else
154             rangeCache.push({ begin, objectCount });
155     }
156
157     BASSERT(allocator.canAllocate());
158     page->setHasFreeLines(lock, false);
159 }
160
161 SmallPage* Heap::allocateSmallPage(std::lock_guard<StaticMutex>& lock, size_t sizeClass)
162 {
163     Vector<SmallPage*>& smallPagesWithFreeLines = m_smallPagesWithFreeLines[sizeClass];
164     while (smallPagesWithFreeLines.size()) {
165         SmallPage* page = smallPagesWithFreeLines.pop();
166         if (!page->refCount(lock) || page->sizeClass() != sizeClass) // Page was promoted to the pages list.
167             continue;
168         return page;
169     }
170
171     SmallPage* page = [this, &lock]() {
172         if (m_smallPages.size())
173             return m_smallPages.pop();
174
175         m_isAllocatingPages = true;
176         SmallPage* page = m_vmHeap.allocateSmallPage(lock);
177         return page;
178     }();
179
180     page->setSizeClass(sizeClass);
181     return page;
182 }
183
184 void Heap::deallocateSmallLine(std::lock_guard<StaticMutex>& lock, SmallLine* line)
185 {
186     BASSERT(!line->refCount(lock));
187     SmallPage* page = SmallPage::get(line);
188     page->deref(lock);
189
190     if (!page->hasFreeLines(lock)) {
191         page->setHasFreeLines(lock, true);
192         m_smallPagesWithFreeLines[page->sizeClass()].push(page);
193
194         BASSERT(page->refCount(lock));
195         return;
196     }
197
198     if (page->refCount(lock))
199         return;
200
201     m_smallPages.push(page);
202     m_scavenger.run();
203 }
204
205 void* Heap::allocateXLarge(std::lock_guard<StaticMutex>& lock, size_t alignment, size_t size)
206 {
207     void* result = tryAllocateXLarge(lock, alignment, size);
208     RELEASE_BASSERT(result);
209     return result;
210 }
211
212 void* Heap::allocateXLarge(std::lock_guard<StaticMutex>& lock, size_t size)
213 {
214     return allocateXLarge(lock, superChunkSize, size);
215 }
216
217 void* Heap::tryAllocateXLarge(std::lock_guard<StaticMutex>&, size_t alignment, size_t size)
218 {
219     BASSERT(isPowerOfTwo(alignment));
220     BASSERT(alignment >= superChunkSize);
221     BASSERT(size == roundUpToMultipleOf<xLargeAlignment>(size));
222
223     void* result = tryVMAllocate(alignment, size);
224     if (!result)
225         return nullptr;
226     m_xLargeObjects.push(Range(result, size));
227     return result;
228 }
229
230 Range& Heap::findXLarge(std::unique_lock<StaticMutex>&, void* object)
231 {
232     for (auto& range : m_xLargeObjects) {
233         if (range.begin() != object)
234             continue;
235         return range;
236     }
237
238     RELEASE_BASSERT(false);
239     return *static_cast<Range*>(nullptr); // Silence compiler error.
240 }
241
242 void Heap::deallocateXLarge(std::unique_lock<StaticMutex>& lock, void* object)
243 {
244     Range toDeallocate = m_xLargeObjects.pop(&findXLarge(lock, object));
245
246     lock.unlock();
247     vmDeallocate(toDeallocate.begin(), toDeallocate.size());
248     lock.lock();
249 }
250
251 inline LargeObject& Heap::splitAndAllocate(LargeObject& largeObject, size_t size)
252 {
253     BASSERT(largeObject.isFree());
254
255     LargeObject nextLargeObject;
256
257     if (largeObject.size() - size > largeMin) {
258         std::pair<LargeObject, LargeObject> split = largeObject.split(size);
259         largeObject = split.first;
260         nextLargeObject = split.second;
261     }
262
263     largeObject.setFree(false);
264
265     if (nextLargeObject) {
266         BASSERT(!nextLargeObject.nextCanMerge());
267         m_largeObjects.insert(nextLargeObject);
268     }
269
270     return largeObject;
271 }
272
273 inline LargeObject& Heap::splitAndAllocate(LargeObject& largeObject, size_t alignment, size_t size)
274 {
275     LargeObject prevLargeObject;
276     LargeObject nextLargeObject;
277
278     size_t alignmentMask = alignment - 1;
279     if (test(largeObject.begin(), alignmentMask)) {
280         size_t prefixSize = roundUpToMultipleOf(alignment, largeObject.begin() + largeMin) - largeObject.begin();
281         std::pair<LargeObject, LargeObject> pair = largeObject.split(prefixSize);
282         prevLargeObject = pair.first;
283         largeObject = pair.second;
284     }
285
286     BASSERT(largeObject.isFree());
287
288     if (largeObject.size() - size > largeMin) {
289         std::pair<LargeObject, LargeObject> split = largeObject.split(size);
290         largeObject = split.first;
291         nextLargeObject = split.second;
292     }
293
294     largeObject.setFree(false);
295
296     if (prevLargeObject) {
297         LargeObject merged = prevLargeObject.merge();
298         m_largeObjects.insert(merged);
299     }
300
301     if (nextLargeObject) {
302         LargeObject merged = nextLargeObject.merge();
303         m_largeObjects.insert(merged);
304     }
305
306     return largeObject;
307 }
308
309 void* Heap::allocateLarge(std::lock_guard<StaticMutex>& lock, size_t size)
310 {
311     BASSERT(size <= largeMax);
312     BASSERT(size >= largeMin);
313     BASSERT(size == roundUpToMultipleOf<largeAlignment>(size));
314
315     LargeObject largeObject = m_largeObjects.take(size);
316     if (!largeObject)
317         largeObject = m_vmHeap.allocateLargeObject(lock, size);
318
319     if (largeObject.vmState().hasVirtual()) {
320         m_isAllocatingPages = true;
321         // We commit before we split in order to avoid split/merge commit/decommit churn.
322         vmAllocatePhysicalPagesSloppy(largeObject.begin(), largeObject.size());
323         largeObject.setVMState(VMState::Physical);
324     }
325
326     largeObject = splitAndAllocate(largeObject, size);
327
328     return largeObject.begin();
329 }
330
331 void* Heap::allocateLarge(std::lock_guard<StaticMutex>& lock, size_t alignment, size_t size, size_t unalignedSize)
332 {
333     BASSERT(size <= largeMax);
334     BASSERT(size >= largeMin);
335     BASSERT(size == roundUpToMultipleOf<largeAlignment>(size));
336     BASSERT(unalignedSize <= largeMax);
337     BASSERT(unalignedSize >= largeMin);
338     BASSERT(unalignedSize == roundUpToMultipleOf<largeAlignment>(unalignedSize));
339     BASSERT(alignment <= largeChunkSize / 2);
340     BASSERT(alignment >= largeAlignment);
341     BASSERT(isPowerOfTwo(alignment));
342
343     LargeObject largeObject = m_largeObjects.take(alignment, size, unalignedSize);
344     if (!largeObject)
345         largeObject = m_vmHeap.allocateLargeObject(lock, alignment, size, unalignedSize);
346
347     if (largeObject.vmState().hasVirtual()) {
348         m_isAllocatingPages = true;
349         // We commit before we split in order to avoid split/merge commit/decommit churn.
350         vmAllocatePhysicalPagesSloppy(largeObject.begin(), largeObject.size());
351         largeObject.setVMState(VMState::Physical);
352     }
353
354     largeObject = splitAndAllocate(largeObject, alignment, size);
355
356     return largeObject.begin();
357 }
358
359 void Heap::deallocateLarge(std::lock_guard<StaticMutex>&, const LargeObject& largeObject)
360 {
361     BASSERT(!largeObject.isFree());
362     largeObject.setFree(true);
363     
364     LargeObject merged = largeObject.merge();
365     m_largeObjects.insert(merged);
366     m_scavenger.run();
367 }
368
369 void Heap::deallocateLarge(std::lock_guard<StaticMutex>& lock, void* object)
370 {
371     LargeObject largeObject(object);
372     deallocateLarge(lock, largeObject);
373 }
374
375 } // namespace bmalloc