bmalloc: Renamed LargeChunk => Chunk
[WebKit-https.git] / Source / bmalloc / bmalloc / Heap.cpp
1 /*
2  * Copyright (C) 2014-2016 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "Heap.h"
27 #include "BumpAllocator.h"
28 #include "Chunk.h"
29 #include "LargeObject.h"
30 #include "PerProcess.h"
31 #include "SmallLine.h"
32 #include "SmallPage.h"
33 #include <thread>
34
35 namespace bmalloc {
36
37 Heap::Heap(std::lock_guard<StaticMutex>&)
38     : m_largeObjects(VMState::HasPhysical::True)
39     , m_isAllocatingPages(false)
40     , m_scavenger(*this, &Heap::concurrentScavenge)
41 {
42     initializeLineMetadata();
43 }
44
45 void Heap::initializeLineMetadata()
46 {
47     // We assume that m_smallLineMetadata is zero-filled.
48
49     for (size_t sizeClass = 0; sizeClass < sizeClassCount; ++sizeClass) {
50         size_t size = objectSize(sizeClass);
51         auto& metadata = m_smallLineMetadata[sizeClass];
52
53         size_t object = 0;
54         size_t line = 0;
55         while (object < vmPageSize) {
56             line = object / smallLineSize;
57             size_t leftover = object % smallLineSize;
58
59             size_t objectCount;
60             size_t remainder;
61             divideRoundingUp(smallLineSize - leftover, size, objectCount, remainder);
62
63             metadata[line] = { static_cast<unsigned short>(leftover), static_cast<unsigned short>(objectCount) };
64
65             object += objectCount * size;
66         }
67
68         // Don't allow the last object in a page to escape the page.
69         if (object > vmPageSize) {
70             BASSERT(metadata[line].objectCount);
71             --metadata[line].objectCount;
72         }
73     }
74 }
75
76 void Heap::concurrentScavenge()
77 {
78     std::unique_lock<StaticMutex> lock(PerProcess<Heap>::mutex());
79     scavenge(lock, scavengeSleepDuration);
80 }
81
82 void Heap::scavenge(std::unique_lock<StaticMutex>& lock, std::chrono::milliseconds sleepDuration)
83 {
84     waitUntilFalse(lock, sleepDuration, m_isAllocatingPages);
85
86     lock.unlock();
87     {
88         std::lock_guard<StaticMutex> lock(PerProcess<Heap>::mutex());
89         scavengeSmallPages(lock);
90     }
91     lock.lock();
92
93     scavengeLargeObjects(lock, sleepDuration);
94     scavengeXLargeObjects(lock, sleepDuration);
95
96     sleep(lock, sleepDuration);
97 }
98
99 void Heap::scavengeSmallPage(std::lock_guard<StaticMutex>& lock)
100 {
101     SmallPage* page = m_smallPages.pop();
102
103     // Transform small object page back into a large object.
104     page->setObjectType(ObjectType::Large);
105
106     LargeObject largeObject(page->begin()->begin());
107     deallocateLarge(lock, largeObject);
108 }
109
110 void Heap::scavengeSmallPages(std::lock_guard<StaticMutex>& lock)
111 {
112     while (!m_smallPages.isEmpty())
113         scavengeSmallPage(lock);
114 }
115
116 void Heap::scavengeLargeObjects(std::unique_lock<StaticMutex>& lock, std::chrono::milliseconds sleepDuration)
117 {
118     while (LargeObject largeObject = m_largeObjects.takeGreedy()) {
119         m_vmHeap.deallocateLargeObject(lock, largeObject);
120         waitUntilFalse(lock, sleepDuration, m_isAllocatingPages);
121     }
122 }
123
124 void Heap::scavengeXLargeObjects(std::unique_lock<StaticMutex>& lock, std::chrono::milliseconds sleepDuration)
125 {
126     while (XLargeRange range = m_xLargeMap.takePhysical()) {
127         lock.unlock();
128         vmDeallocatePhysicalPagesSloppy(range.begin(), range.size());
129         lock.lock();
130         
131         range.setVMState(VMState::Virtual);
132         m_xLargeMap.addVirtual(range);
133
134         waitUntilFalse(lock, sleepDuration, m_isAllocatingPages);
135     }
136
137     m_xLargeMap.shrinkToFit();
138 }
139
140 void Heap::allocateSmallBumpRanges(std::lock_guard<StaticMutex>& lock, size_t sizeClass, BumpAllocator& allocator, BumpRangeCache& rangeCache)
141 {
142     BASSERT(!rangeCache.size());
143     SmallPage* page = allocateSmallPage(lock, sizeClass);
144     SmallLine* lines = page->begin();
145     BASSERT(page->hasFreeLines(lock));
146
147     // Find a free line.
148     for (size_t lineNumber = 0; lineNumber < smallLineCount; ++lineNumber) {
149         if (lines[lineNumber].refCount(lock))
150             continue;
151
152         LineMetadata& lineMetadata = m_smallLineMetadata[sizeClass][lineNumber];
153         if (!lineMetadata.objectCount)
154             continue;
155
156         // In a fragmented page, some free ranges might not fit in the cache.
157         if (rangeCache.size() == rangeCache.capacity()) {
158             m_smallPagesWithFreeLines[sizeClass].push(page);
159             BASSERT(allocator.canAllocate());
160             return;
161         }
162
163         char* begin = lines[lineNumber].begin() + lineMetadata.startOffset;
164         unsigned short objectCount = lineMetadata.objectCount;
165         lines[lineNumber].ref(lock, lineMetadata.objectCount);
166         page->ref(lock);
167
168         // Merge with subsequent free lines.
169         while (++lineNumber < smallLineCount) {
170             if (lines[lineNumber].refCount(lock))
171                 break;
172
173             LineMetadata& lineMetadata = m_smallLineMetadata[sizeClass][lineNumber];
174             if (!lineMetadata.objectCount)
175                 continue;
176
177             objectCount += lineMetadata.objectCount;
178             lines[lineNumber].ref(lock, lineMetadata.objectCount);
179             page->ref(lock);
180         }
181
182         if (!allocator.canAllocate())
183             allocator.refill({ begin, objectCount });
184         else
185             rangeCache.push({ begin, objectCount });
186     }
187
188     BASSERT(allocator.canAllocate());
189     page->setHasFreeLines(lock, false);
190 }
191
192 SmallPage* Heap::allocateSmallPage(std::lock_guard<StaticMutex>& lock, size_t sizeClass)
193 {
194     if (!m_smallPagesWithFreeLines[sizeClass].isEmpty())
195         return m_smallPagesWithFreeLines[sizeClass].popFront();
196     
197     if (!m_smallPages.isEmpty()) {
198         SmallPage* page = m_smallPages.pop();
199         page->setSizeClass(sizeClass);
200         return page;
201     }
202
203     size_t unalignedSize = largeMin + vmPageSize - largeAlignment + vmPageSize;
204     LargeObject largeObject = allocateLarge(lock, vmPageSize, vmPageSize, unalignedSize);
205
206     // Transform our large object into a small object page. We deref here
207     // because our small objects will keep their own refcounts on the line.
208     Object object(largeObject.begin());
209     object.line()->deref(lock);
210     object.page()->setObjectType(ObjectType::Small);
211
212     object.page()->setSizeClass(sizeClass);
213     return object.page();
214 }
215
216 void Heap::deallocateSmallLine(std::lock_guard<StaticMutex>& lock, Object object)
217 {
218     BASSERT(!object.line()->refCount(lock));
219     SmallPage* page = object.page();
220     if (page->objectType() == ObjectType::Large)
221         return deallocateLarge(lock, LargeObject(object.begin()));
222
223     page->deref(lock);
224     if (!page->hasFreeLines(lock)) {
225         page->setHasFreeLines(lock, true);
226         m_smallPagesWithFreeLines[page->sizeClass()].push(page);
227
228         BASSERT(page->refCount(lock));
229         return;
230     }
231
232     if (page->refCount(lock))
233         return;
234
235     m_smallPagesWithFreeLines[page->sizeClass()].remove(page);
236     m_smallPages.push(page);
237     m_scavenger.run();
238 }
239
240 inline LargeObject& Heap::splitAndAllocate(std::lock_guard<StaticMutex>& lock, LargeObject& largeObject, size_t size)
241 {
242     BASSERT(largeObject.isFree());
243
244     LargeObject nextLargeObject;
245
246     if (largeObject.size() - size >= largeMin) {
247         std::pair<LargeObject, LargeObject> split = largeObject.split(size);
248         largeObject = split.first;
249         nextLargeObject = split.second;
250     }
251
252     largeObject.setFree(false);
253     Object object(largeObject.begin());
254     object.line()->ref(lock);
255     BASSERT(object.page()->objectType() == ObjectType::Large);
256
257     if (nextLargeObject) {
258         BASSERT(!nextLargeObject.nextCanMerge());
259         m_largeObjects.insert(nextLargeObject);
260     }
261
262     return largeObject;
263 }
264
265 inline LargeObject& Heap::splitAndAllocate(std::lock_guard<StaticMutex>& lock, LargeObject& largeObject, size_t alignment, size_t size)
266 {
267     LargeObject prevLargeObject;
268     LargeObject nextLargeObject;
269
270     size_t alignmentMask = alignment - 1;
271     if (test(largeObject.begin(), alignmentMask)) {
272         size_t prefixSize = roundUpToMultipleOf(alignment, largeObject.begin() + largeMin) - largeObject.begin();
273         std::pair<LargeObject, LargeObject> pair = largeObject.split(prefixSize);
274         prevLargeObject = pair.first;
275         largeObject = pair.second;
276     }
277
278     BASSERT(largeObject.isFree());
279
280     if (largeObject.size() - size >= largeMin) {
281         std::pair<LargeObject, LargeObject> split = largeObject.split(size);
282         largeObject = split.first;
283         nextLargeObject = split.second;
284     }
285
286     largeObject.setFree(false);
287     Object object(largeObject.begin());
288     object.line()->ref(lock);
289     BASSERT(object.page()->objectType() == ObjectType::Large);
290
291     if (prevLargeObject) {
292         LargeObject merged = prevLargeObject.merge();
293         m_largeObjects.insert(merged);
294     }
295
296     if (nextLargeObject) {
297         LargeObject merged = nextLargeObject.merge();
298         m_largeObjects.insert(merged);
299     }
300
301     return largeObject;
302 }
303
304 void* Heap::allocateLarge(std::lock_guard<StaticMutex>& lock, size_t size)
305 {
306     BASSERT(size <= largeMax);
307     BASSERT(size >= largeMin);
308     BASSERT(size == roundUpToMultipleOf<largeAlignment>(size));
309     
310     if (size <= vmPageSize)
311         scavengeSmallPages(lock);
312
313     LargeObject largeObject = m_largeObjects.take(size);
314     if (!largeObject)
315         largeObject = m_vmHeap.allocateLargeObject(lock, size);
316
317     if (largeObject.vmState().hasVirtual()) {
318         m_isAllocatingPages = true;
319         // We commit before we split in order to avoid split/merge commit/decommit churn.
320         vmAllocatePhysicalPagesSloppy(largeObject.begin(), largeObject.size());
321         largeObject.setVMState(VMState::Physical);
322     }
323
324     largeObject = splitAndAllocate(lock, largeObject, size);
325
326     return largeObject.begin();
327 }
328
329 void* Heap::allocateLarge(std::lock_guard<StaticMutex>& lock, size_t alignment, size_t size, size_t unalignedSize)
330 {
331     BASSERT(size <= largeMax);
332     BASSERT(size >= largeMin);
333     BASSERT(size == roundUpToMultipleOf<largeAlignment>(size));
334     BASSERT(unalignedSize <= largeMax);
335     BASSERT(unalignedSize >= largeMin);
336     BASSERT(unalignedSize == roundUpToMultipleOf<largeAlignment>(unalignedSize));
337     BASSERT(alignment <= chunkSize / 2);
338     BASSERT(alignment >= largeAlignment);
339     BASSERT(isPowerOfTwo(alignment));
340
341     if (size <= vmPageSize)
342         scavengeSmallPages(lock);
343
344     LargeObject largeObject = m_largeObjects.take(alignment, size, unalignedSize);
345     if (!largeObject)
346         largeObject = m_vmHeap.allocateLargeObject(lock, alignment, size, unalignedSize);
347
348     if (largeObject.vmState().hasVirtual()) {
349         m_isAllocatingPages = true;
350         // We commit before we split in order to avoid split/merge commit/decommit churn.
351         vmAllocatePhysicalPagesSloppy(largeObject.begin(), largeObject.size());
352         largeObject.setVMState(VMState::Physical);
353     }
354
355     largeObject = splitAndAllocate(lock, largeObject, alignment, size);
356
357     return largeObject.begin();
358 }
359
360 void Heap::shrinkLarge(std::lock_guard<StaticMutex>& lock, LargeObject& largeObject, size_t newSize)
361 {
362     std::pair<LargeObject, LargeObject> split = largeObject.split(newSize);
363     deallocateLarge(lock, split.second);
364 }
365
366 void Heap::deallocateLarge(std::lock_guard<StaticMutex>&, const LargeObject& largeObject)
367 {
368     BASSERT(!largeObject.isFree());
369     BASSERT(Object(largeObject.begin()).page()->objectType() == ObjectType::Large);
370     largeObject.setFree(true);
371     
372     LargeObject merged = largeObject.merge();
373     m_largeObjects.insert(merged);
374     m_scavenger.run();
375 }
376
377 void* Heap::allocateXLarge(std::lock_guard<StaticMutex>& lock, size_t alignment, size_t size)
378 {
379     void* result = tryAllocateXLarge(lock, alignment, size);
380     RELEASE_BASSERT(result);
381     return result;
382 }
383
384 void* Heap::allocateXLarge(std::lock_guard<StaticMutex>& lock, size_t size)
385 {
386     return allocateXLarge(lock, alignment, size);
387 }
388
389 XLargeRange Heap::splitAndAllocate(XLargeRange& range, size_t alignment, size_t size)
390 {
391     XLargeRange prev;
392     XLargeRange next;
393
394     size_t alignmentMask = alignment - 1;
395     if (test(range.begin(), alignmentMask)) {
396         size_t prefixSize = roundUpToMultipleOf(alignment, range.begin()) - range.begin();
397         std::pair<XLargeRange, XLargeRange> pair = range.split(prefixSize);
398         prev = pair.first;
399         range = pair.second;
400     }
401
402     if (range.size() - size >= xLargeAlignment) {
403         size_t alignedSize = roundUpToMultipleOf<xLargeAlignment>(size);
404         std::pair<XLargeRange, XLargeRange> pair = range.split(alignedSize);
405         range = pair.first;
406         next = pair.second;
407     }
408
409     // At this point our range might contain an unused tail fragment. This is
410     // common. We can't allocate the tail fragment because it's aligned to less
411     // than xLargeAlignment. So, we pair the allocation with its tail fragment
412     // in the allocated list. This is an important optimization because it
413     // keeps the free list short, speeding up allocation and merging.
414
415     std::pair<XLargeRange, XLargeRange> allocated = range.split(roundUpToMultipleOf<vmPageSize>(size));
416     if (allocated.first.vmState().hasVirtual()) {
417         vmAllocatePhysicalPagesSloppy(allocated.first.begin(), allocated.first.size());
418         allocated.first.setVMState(VMState::Physical);
419     }
420
421     m_xLargeMap.addAllocated(prev, allocated, next);
422     return allocated.first;
423 }
424
425 void* Heap::tryAllocateXLarge(std::lock_guard<StaticMutex>&, size_t alignment, size_t size)
426 {
427     BASSERT(isPowerOfTwo(alignment));
428     BASSERT(alignment < xLargeMax);
429
430     m_isAllocatingPages = true;
431
432     alignment = roundUpToMultipleOf<xLargeAlignment>(alignment);
433
434     XLargeRange range = m_xLargeMap.takeFree(alignment, size);
435     if (!range) {
436         // We allocate VM in aligned multiples to increase the chances that
437         // the OS will provide contiguous ranges that we can merge.
438         size_t alignedSize = roundUpToMultipleOf<xLargeAlignment>(size);
439
440         void* begin = tryVMAllocate(alignment, alignedSize);
441         if (!begin)
442             return nullptr;
443         range = XLargeRange(begin, alignedSize, VMState::Virtual);
444     }
445
446     return splitAndAllocate(range, alignment, size).begin();
447 }
448
449 size_t Heap::xLargeSize(std::unique_lock<StaticMutex>&, void* object)
450 {
451     return m_xLargeMap.getAllocated(object).size();
452 }
453
454 void Heap::shrinkXLarge(std::unique_lock<StaticMutex>&, const Range& object, size_t newSize)
455 {
456     BASSERT(object.size() > newSize);
457
458     if (object.size() - newSize < vmPageSize)
459         return;
460     
461     XLargeRange range = m_xLargeMap.takeAllocated(object.begin());
462     splitAndAllocate(range, xLargeAlignment, newSize);
463
464     m_scavenger.run();
465 }
466
467 void Heap::deallocateXLarge(std::unique_lock<StaticMutex>&, void* object)
468 {
469     XLargeRange range = m_xLargeMap.takeAllocated(object);
470     m_xLargeMap.addFree(range);
471     
472     m_scavenger.run();
473 }
474
475 } // namespace bmalloc