Release assert in com.apple.WebKit.WebContent under JavaScriptCore: JSC::JSONProtoFun...
[WebKit-https.git] / Source / bmalloc / bmalloc / Heap.cpp
1 /*
2  * Copyright (C) 2014, 2015 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "Heap.h"
27 #include "LargeChunk.h"
28 #include "LargeObject.h"
29 #include "Line.h"
30 #include "MediumChunk.h"
31 #include "Page.h"
32 #include "PerProcess.h"
33 #include "SmallChunk.h"
34 #include <thread>
35
36 namespace bmalloc {
37
38 Heap::Heap(std::lock_guard<StaticMutex>&)
39     : m_largeObjects(Owner::Heap)
40     , m_isAllocatingPages(false)
41     , m_scavenger(*this, &Heap::concurrentScavenge)
42 {
43     initializeLineMetadata();
44 }
45
46 void Heap::initializeLineMetadata()
47 {
48     for (unsigned short size = alignment; size <= smallMax; size += alignment) {
49         unsigned short startOffset = 0;
50         for (size_t lineNumber = 0; lineNumber < SmallPage::lineCount - 1; ++lineNumber) {
51             unsigned short objectCount;
52             unsigned short remainder;
53             divideRoundingUp(static_cast<unsigned short>(SmallPage::lineSize - startOffset), size, objectCount, remainder);
54             BASSERT(objectCount);
55             m_smallLineMetadata[sizeClass(size)][lineNumber] = { startOffset, objectCount };
56             startOffset = remainder ? size - remainder : 0;
57         }
58
59         // The last line in the page rounds down instead of up because it's not allowed to overlap into its neighbor.
60         unsigned short objectCount = static_cast<unsigned short>((SmallPage::lineSize - startOffset) / size);
61         m_smallLineMetadata[sizeClass(size)][SmallPage::lineCount - 1] = { startOffset, objectCount };
62     }
63
64     for (unsigned short size = smallMax + alignment; size <= mediumMax; size += alignment) {
65         unsigned short startOffset = 0;
66         for (size_t lineNumber = 0; lineNumber < MediumPage::lineCount - 1; ++lineNumber) {
67             unsigned short objectCount;
68             unsigned short remainder;
69             divideRoundingUp(static_cast<unsigned short>(MediumPage::lineSize - startOffset), size, objectCount, remainder);
70             BASSERT(objectCount);
71             m_mediumLineMetadata[sizeClass(size)][lineNumber] = { startOffset, objectCount };
72             startOffset = remainder ? size - remainder : 0;
73         }
74
75         // The last line in the page rounds down instead of up because it's not allowed to overlap into its neighbor.
76         unsigned short objectCount = static_cast<unsigned short>((MediumPage::lineSize - startOffset) / size);
77         m_mediumLineMetadata[sizeClass(size)][MediumPage::lineCount - 1] = { startOffset, objectCount };
78     }
79 }
80
81 void Heap::concurrentScavenge()
82 {
83     std::unique_lock<StaticMutex> lock(PerProcess<Heap>::mutex());
84     scavenge(lock, scavengeSleepDuration);
85 }
86
87 void Heap::scavenge(std::unique_lock<StaticMutex>& lock, std::chrono::milliseconds sleepDuration)
88 {
89     waitUntilFalse(lock, sleepDuration, m_isAllocatingPages);
90
91     scavengeSmallPages(lock, sleepDuration);
92     scavengeMediumPages(lock, sleepDuration);
93     scavengeLargeObjects(lock, sleepDuration);
94
95     sleep(lock, sleepDuration);
96 }
97
98 void Heap::scavengeSmallPages(std::unique_lock<StaticMutex>& lock, std::chrono::milliseconds sleepDuration)
99 {
100     while (m_smallPages.size()) {
101         m_vmHeap.deallocateSmallPage(lock, m_smallPages.pop());
102         waitUntilFalse(lock, sleepDuration, m_isAllocatingPages);
103     }
104 }
105
106 void Heap::scavengeMediumPages(std::unique_lock<StaticMutex>& lock, std::chrono::milliseconds sleepDuration)
107 {
108     while (m_mediumPages.size()) {
109         m_vmHeap.deallocateMediumPage(lock, m_mediumPages.pop());
110         waitUntilFalse(lock, sleepDuration, m_isAllocatingPages);
111     }
112 }
113
114 void Heap::scavengeLargeObjects(std::unique_lock<StaticMutex>& lock, std::chrono::milliseconds sleepDuration)
115 {
116     while (LargeObject largeObject = m_largeObjects.takeGreedy()) {
117         m_vmHeap.deallocateLargeObject(lock, largeObject);
118         waitUntilFalse(lock, sleepDuration, m_isAllocatingPages);
119     }
120 }
121
122 void Heap::refillSmallBumpRangeCache(std::lock_guard<StaticMutex>& lock, size_t sizeClass, BumpRangeCache& rangeCache)
123 {
124     BASSERT(!rangeCache.size());
125     SmallPage* page = allocateSmallPage(lock, sizeClass);
126     SmallLine* lines = page->begin();
127
128     // Due to overlap from the previous line, the last line in the page may not be able to fit any objects.
129     size_t end = SmallPage::lineCount;
130     if (!m_smallLineMetadata[sizeClass][SmallPage::lineCount - 1].objectCount)
131         --end;
132
133     // Find a free line.
134     for (size_t lineNumber = 0; lineNumber < end; ++lineNumber) {
135         if (lines[lineNumber].refCount(lock))
136             continue;
137
138         LineMetadata& lineMetadata = m_smallLineMetadata[sizeClass][lineNumber];
139         char* begin = lines[lineNumber].begin() + lineMetadata.startOffset;
140         unsigned short objectCount = lineMetadata.objectCount;
141         lines[lineNumber].ref(lock, lineMetadata.objectCount);
142         page->ref(lock);
143
144         // Merge with subsequent free lines.
145         while (++lineNumber < end) {
146             if (lines[lineNumber].refCount(lock))
147                 break;
148
149             LineMetadata& lineMetadata = m_smallLineMetadata[sizeClass][lineNumber];
150             objectCount += lineMetadata.objectCount;
151             lines[lineNumber].ref(lock, lineMetadata.objectCount);
152             page->ref(lock);
153         }
154
155         rangeCache.push({ begin, objectCount });
156     }
157 }
158
159 void Heap::refillMediumBumpRangeCache(std::lock_guard<StaticMutex>& lock, size_t sizeClass, BumpRangeCache& rangeCache)
160 {
161     MediumPage* page = allocateMediumPage(lock, sizeClass);
162     BASSERT(!rangeCache.size());
163     MediumLine* lines = page->begin();
164
165     // Due to overlap from the previous line, the last line in the page may not be able to fit any objects.
166     size_t end = MediumPage::lineCount;
167     if (!m_mediumLineMetadata[sizeClass][MediumPage::lineCount - 1].objectCount)
168         --end;
169
170     // Find a free line.
171     for (size_t lineNumber = 0; lineNumber < end; ++lineNumber) {
172         if (lines[lineNumber].refCount(lock))
173             continue;
174
175         LineMetadata& lineMetadata = m_mediumLineMetadata[sizeClass][lineNumber];
176         char* begin = lines[lineNumber].begin() + lineMetadata.startOffset;
177         unsigned short objectCount = lineMetadata.objectCount;
178         lines[lineNumber].ref(lock, lineMetadata.objectCount);
179         page->ref(lock);
180         
181         // Merge with subsequent free lines.
182         while (++lineNumber < end) {
183             if (lines[lineNumber].refCount(lock))
184                 break;
185
186             LineMetadata& lineMetadata = m_mediumLineMetadata[sizeClass][lineNumber];
187             objectCount += lineMetadata.objectCount;
188             lines[lineNumber].ref(lock, lineMetadata.objectCount);
189             page->ref(lock);
190         }
191
192         rangeCache.push({ begin, objectCount });
193     }
194 }
195
196 SmallPage* Heap::allocateSmallPage(std::lock_guard<StaticMutex>& lock, size_t sizeClass)
197 {
198     Vector<SmallPage*>& smallPagesWithFreeLines = m_smallPagesWithFreeLines[sizeClass];
199     while (smallPagesWithFreeLines.size()) {
200         SmallPage* page = smallPagesWithFreeLines.pop();
201         if (!page->refCount(lock) || page->sizeClass() != sizeClass) // Page was promoted to the pages list.
202             continue;
203         return page;
204     }
205
206     SmallPage* page = [this, sizeClass]() {
207         if (m_smallPages.size())
208             return m_smallPages.pop();
209
210         m_isAllocatingPages = true;
211         return m_vmHeap.allocateSmallPage();
212     }();
213
214     page->setSizeClass(sizeClass);
215     return page;
216 }
217
218 MediumPage* Heap::allocateMediumPage(std::lock_guard<StaticMutex>& lock, size_t sizeClass)
219 {
220     Vector<MediumPage*>& mediumPagesWithFreeLines = m_mediumPagesWithFreeLines[sizeClass];
221     while (mediumPagesWithFreeLines.size()) {
222         MediumPage* page = mediumPagesWithFreeLines.pop();
223         if (!page->refCount(lock) || page->sizeClass() != sizeClass) // Page was promoted to the pages list.
224             continue;
225         return page;
226     }
227
228     MediumPage* page = [this, sizeClass]() {
229         if (m_mediumPages.size())
230             return m_mediumPages.pop();
231
232         m_isAllocatingPages = true;
233         return m_vmHeap.allocateMediumPage();
234     }();
235
236     page->setSizeClass(sizeClass);
237     return page;
238 }
239
240 void Heap::deallocateSmallLine(std::lock_guard<StaticMutex>& lock, SmallLine* line)
241 {
242     BASSERT(!line->refCount(lock));
243     SmallPage* page = SmallPage::get(line);
244     size_t refCount = page->refCount(lock);
245     page->deref(lock);
246
247     switch (refCount) {
248     case SmallPage::lineCount: {
249         // First free line in the page.
250         m_smallPagesWithFreeLines[page->sizeClass()].push(page);
251         break;
252     }
253     case 1: {
254         // Last free line in the page.
255         m_smallPages.push(page);
256         m_scavenger.run();
257         break;
258     }
259     }
260 }
261
262 void Heap::deallocateMediumLine(std::lock_guard<StaticMutex>& lock, MediumLine* line)
263 {
264     BASSERT(!line->refCount(lock));
265     MediumPage* page = MediumPage::get(line);
266     size_t refCount = page->refCount(lock);
267     page->deref(lock);
268
269     switch (refCount) {
270     case MediumPage::lineCount: {
271         // First free line in the page.
272         m_mediumPagesWithFreeLines[page->sizeClass()].push(page);
273         break;
274     }
275     case 1: {
276         // Last free line in the page.
277         m_mediumPages.push(page);
278         m_scavenger.run();
279         break;
280     }
281     }
282 }
283
284 void* Heap::allocateXLarge(std::lock_guard<StaticMutex>& lock, size_t alignment, size_t size)
285 {
286     void* result = tryAllocateXLarge(lock, alignment, size);
287     RELEASE_BASSERT(result);
288     return result;
289 }
290
291 void* Heap::allocateXLarge(std::lock_guard<StaticMutex>& lock, size_t size)
292 {
293     return allocateXLarge(lock, superChunkSize, size);
294 }
295
296 void* Heap::tryAllocateXLarge(std::lock_guard<StaticMutex>&, size_t alignment, size_t size)
297 {
298     BASSERT(isPowerOfTwo(alignment));
299     BASSERT(alignment >= superChunkSize);
300     BASSERT(size == roundUpToMultipleOf<xLargeAlignment>(size));
301
302     void* result = tryVMAllocate(alignment, size);
303     if (!result)
304         return nullptr;
305     m_xLargeObjects.push(Range(result, size));
306     return result;
307 }
308
309 Range& Heap::findXLarge(std::unique_lock<StaticMutex>&, void* object)
310 {
311     for (auto& range : m_xLargeObjects) {
312         if (range.begin() != object)
313             continue;
314         return range;
315     }
316
317     RELEASE_BASSERT(false);
318     return *static_cast<Range*>(nullptr); // Silence compiler error.
319 }
320
321 void Heap::deallocateXLarge(std::unique_lock<StaticMutex>& lock, void* object)
322 {
323     Range toDeallocate = m_xLargeObjects.pop(&findXLarge(lock, object));
324
325     lock.unlock();
326     vmDeallocate(toDeallocate.begin(), toDeallocate.size());
327     lock.lock();
328 }
329
330 void* Heap::allocateLarge(std::lock_guard<StaticMutex>&, LargeObject& largeObject, size_t size)
331 {
332     BASSERT(largeObject.isFree());
333
334     if (largeObject.size() - size > largeMin) {
335         std::pair<LargeObject, LargeObject> split = largeObject.split(size);
336         largeObject = split.first;
337         m_largeObjects.insert(split.second);
338     }
339
340     largeObject.setFree(false);
341     return largeObject.begin();
342 }
343
344 void* Heap::allocateLarge(std::lock_guard<StaticMutex>& lock, size_t size)
345 {
346     BASSERT(size <= largeMax);
347     BASSERT(size >= largeMin);
348     BASSERT(size == roundUpToMultipleOf<largeAlignment>(size));
349     
350     LargeObject largeObject = m_largeObjects.take(size);
351     if (!largeObject) {
352         m_isAllocatingPages = true;
353         largeObject = m_vmHeap.allocateLargeObject(size);
354     }
355
356     return allocateLarge(lock, largeObject, size);
357 }
358
359 void* Heap::allocateLarge(std::lock_guard<StaticMutex>& lock, size_t alignment, size_t size, size_t unalignedSize)
360 {
361     BASSERT(size <= largeMax);
362     BASSERT(size >= largeMin);
363     BASSERT(size == roundUpToMultipleOf<largeAlignment>(size));
364     BASSERT(unalignedSize <= largeMax);
365     BASSERT(unalignedSize >= largeMin);
366     BASSERT(unalignedSize == roundUpToMultipleOf<largeAlignment>(unalignedSize));
367     BASSERT(alignment <= largeChunkSize / 2);
368     BASSERT(alignment >= largeAlignment);
369     BASSERT(isPowerOfTwo(alignment));
370
371     LargeObject largeObject = m_largeObjects.take(alignment, size, unalignedSize);
372     if (!largeObject) {
373         m_isAllocatingPages = true;
374         largeObject = m_vmHeap.allocateLargeObject(alignment, size, unalignedSize);
375     }
376
377     size_t alignmentMask = alignment - 1;
378     if (test(largeObject.begin(), alignmentMask)) {
379         size_t prefixSize = roundUpToMultipleOf(alignment, largeObject.begin() + largeMin) - largeObject.begin();
380         std::pair<LargeObject, LargeObject> pair = largeObject.split(prefixSize);
381         m_largeObjects.insert(pair.first);
382         largeObject = pair.second;
383     }
384
385     return allocateLarge(lock, largeObject, size);
386 }
387
388 void Heap::deallocateLarge(std::lock_guard<StaticMutex>&, const LargeObject& largeObject)
389 {
390     BASSERT(!largeObject.isFree());
391     largeObject.setFree(true);
392     
393     LargeObject merged = largeObject.merge();
394     m_largeObjects.insert(merged);
395     m_scavenger.run();
396 }
397
398 void Heap::deallocateLarge(std::lock_guard<StaticMutex>& lock, void* object)
399 {
400     LargeObject largeObject(object);
401     deallocateLarge(lock, largeObject);
402 }
403
404 } // namespace bmalloc