bmalloc: Pathological madvise churn on the free(malloc(x)) benchmark
[WebKit-https.git] / Source / bmalloc / bmalloc / Heap.cpp
1 /*
2  * Copyright (C) 2014, 2015 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "Heap.h"
27 #include "LargeChunk.h"
28 #include "LargeObject.h"
29 #include "Line.h"
30 #include "MediumChunk.h"
31 #include "Page.h"
32 #include "PerProcess.h"
33 #include "SmallChunk.h"
34 #include <thread>
35
36 namespace bmalloc {
37
38 static inline void sleep(std::unique_lock<StaticMutex>& lock, std::chrono::milliseconds duration)
39 {
40     if (duration == std::chrono::milliseconds(0))
41         return;
42     
43     lock.unlock();
44     std::this_thread::sleep_for(duration);
45     lock.lock();
46 }
47
48 Heap::Heap(std::lock_guard<StaticMutex>&)
49     : m_largeObjects(Owner::Heap)
50     , m_isAllocatingPages(false)
51     , m_scavenger(*this, &Heap::concurrentScavenge)
52 {
53     initializeLineMetadata();
54 }
55
56 void Heap::initializeLineMetadata()
57 {
58     for (unsigned short size = alignment; size <= smallMax; size += alignment) {
59         unsigned short startOffset = 0;
60         for (size_t lineNumber = 0; lineNumber < SmallPage::lineCount - 1; ++lineNumber) {
61             unsigned short objectCount;
62             unsigned short remainder;
63             divideRoundingUp(static_cast<unsigned short>(SmallPage::lineSize - startOffset), size, objectCount, remainder);
64             BASSERT(objectCount);
65             m_smallLineMetadata[sizeClass(size)][lineNumber] = { startOffset, objectCount };
66             startOffset = remainder ? size - remainder : 0;
67         }
68
69         // The last line in the page rounds down instead of up because it's not allowed to overlap into its neighbor.
70         unsigned short objectCount = static_cast<unsigned short>((SmallPage::lineSize - startOffset) / size);
71         m_smallLineMetadata[sizeClass(size)][SmallPage::lineCount - 1] = { startOffset, objectCount };
72     }
73
74     for (unsigned short size = smallMax + alignment; size <= mediumMax; size += alignment) {
75         unsigned short startOffset = 0;
76         for (size_t lineNumber = 0; lineNumber < MediumPage::lineCount - 1; ++lineNumber) {
77             unsigned short objectCount;
78             unsigned short remainder;
79             divideRoundingUp(static_cast<unsigned short>(MediumPage::lineSize - startOffset), size, objectCount, remainder);
80             BASSERT(objectCount);
81             m_mediumLineMetadata[sizeClass(size)][lineNumber] = { startOffset, objectCount };
82             startOffset = remainder ? size - remainder : 0;
83         }
84
85         // The last line in the page rounds down instead of up because it's not allowed to overlap into its neighbor.
86         unsigned short objectCount = static_cast<unsigned short>((MediumPage::lineSize - startOffset) / size);
87         m_mediumLineMetadata[sizeClass(size)][MediumPage::lineCount - 1] = { startOffset, objectCount };
88     }
89 }
90
91 void Heap::concurrentScavenge()
92 {
93     std::unique_lock<StaticMutex> lock(PerProcess<Heap>::mutex());
94     scavenge(lock, scavengeSleepDuration);
95 }
96     
97 void Heap::scavenge(std::unique_lock<StaticMutex>& lock, std::chrono::milliseconds sleepDuration)
98 {
99     scavengeSmallPages(lock, sleepDuration);
100     scavengeMediumPages(lock, sleepDuration);
101     scavengeLargeRanges(lock, sleepDuration);
102
103     sleep(lock, sleepDuration);
104 }
105
106 void Heap::scavengeSmallPages(std::unique_lock<StaticMutex>& lock, std::chrono::milliseconds sleepDuration)
107 {
108     while (1) {
109         if (m_isAllocatingPages) {
110             m_isAllocatingPages = false;
111
112             sleep(lock, sleepDuration);
113             continue;
114         }
115
116         if (!m_smallPages.size())
117             return;
118         m_vmHeap.deallocateSmallPage(lock, m_smallPages.pop());
119     }
120 }
121
122 void Heap::scavengeMediumPages(std::unique_lock<StaticMutex>& lock, std::chrono::milliseconds sleepDuration)
123 {
124     while (1) {
125         if (m_isAllocatingPages) {
126             m_isAllocatingPages = false;
127
128             sleep(lock, sleepDuration);
129             continue;
130         }
131
132         if (!m_mediumPages.size())
133             return;
134         m_vmHeap.deallocateMediumPage(lock, m_mediumPages.pop());
135     }
136 }
137
138 void Heap::scavengeLargeRanges(std::unique_lock<StaticMutex>& lock, std::chrono::milliseconds sleepDuration)
139 {
140     while (1) {
141         if (m_isAllocatingPages) {
142             m_isAllocatingPages = false;
143
144             sleep(lock, sleepDuration);
145             continue;
146         }
147
148         LargeObject largeObject = m_largeObjects.takeGreedy();
149         if (!largeObject)
150             return;
151         m_vmHeap.deallocateLargeObject(lock, largeObject);
152     }
153 }
154
155 void Heap::refillSmallBumpRangeCache(std::lock_guard<StaticMutex>& lock, size_t sizeClass, BumpRangeCache& rangeCache)
156 {
157     BASSERT(!rangeCache.size());
158     SmallPage* page = allocateSmallPage(lock, sizeClass);
159     SmallLine* lines = page->begin();
160
161     // Due to overlap from the previous line, the last line in the page may not be able to fit any objects.
162     size_t end = SmallPage::lineCount;
163     if (!m_smallLineMetadata[sizeClass][SmallPage::lineCount - 1].objectCount)
164         --end;
165
166     // Find a free line.
167     for (size_t lineNumber = 0; lineNumber < end; ++lineNumber) {
168         if (lines[lineNumber].refCount(lock))
169             continue;
170
171         LineMetadata& lineMetadata = m_smallLineMetadata[sizeClass][lineNumber];
172         char* begin = lines[lineNumber].begin() + lineMetadata.startOffset;
173         unsigned short objectCount = lineMetadata.objectCount;
174         lines[lineNumber].ref(lock, lineMetadata.objectCount);
175         page->ref(lock);
176
177         // Merge with subsequent free lines.
178         while (++lineNumber < end) {
179             if (lines[lineNumber].refCount(lock))
180                 break;
181
182             LineMetadata& lineMetadata = m_smallLineMetadata[sizeClass][lineNumber];
183             objectCount += lineMetadata.objectCount;
184             lines[lineNumber].ref(lock, lineMetadata.objectCount);
185             page->ref(lock);
186         }
187
188         rangeCache.push({ begin, objectCount });
189     }
190 }
191
192 void Heap::refillMediumBumpRangeCache(std::lock_guard<StaticMutex>& lock, size_t sizeClass, BumpRangeCache& rangeCache)
193 {
194     MediumPage* page = allocateMediumPage(lock, sizeClass);
195     BASSERT(!rangeCache.size());
196     MediumLine* lines = page->begin();
197
198     // Due to overlap from the previous line, the last line in the page may not be able to fit any objects.
199     size_t end = MediumPage::lineCount;
200     if (!m_mediumLineMetadata[sizeClass][MediumPage::lineCount - 1].objectCount)
201         --end;
202
203     // Find a free line.
204     for (size_t lineNumber = 0; lineNumber < end; ++lineNumber) {
205         if (lines[lineNumber].refCount(lock))
206             continue;
207
208         LineMetadata& lineMetadata = m_mediumLineMetadata[sizeClass][lineNumber];
209         char* begin = lines[lineNumber].begin() + lineMetadata.startOffset;
210         unsigned short objectCount = lineMetadata.objectCount;
211         lines[lineNumber].ref(lock, lineMetadata.objectCount);
212         page->ref(lock);
213         
214         // Merge with subsequent free lines.
215         while (++lineNumber < end) {
216             if (lines[lineNumber].refCount(lock))
217                 break;
218
219             LineMetadata& lineMetadata = m_mediumLineMetadata[sizeClass][lineNumber];
220             objectCount += lineMetadata.objectCount;
221             lines[lineNumber].ref(lock, lineMetadata.objectCount);
222             page->ref(lock);
223         }
224
225         rangeCache.push({ begin, objectCount });
226     }
227 }
228
229 SmallPage* Heap::allocateSmallPage(std::lock_guard<StaticMutex>& lock, size_t sizeClass)
230 {
231     Vector<SmallPage*>& smallPagesWithFreeLines = m_smallPagesWithFreeLines[sizeClass];
232     while (smallPagesWithFreeLines.size()) {
233         SmallPage* page = smallPagesWithFreeLines.pop();
234         if (!page->refCount(lock) || page->sizeClass() != sizeClass) // Page was promoted to the pages list.
235             continue;
236         return page;
237     }
238     
239     m_isAllocatingPages = true;
240
241     SmallPage* page = [this, sizeClass]() {
242         if (m_smallPages.size())
243             return m_smallPages.pop();
244         return m_vmHeap.allocateSmallPage();
245     }();
246
247     page->setSizeClass(sizeClass);
248     return page;
249 }
250
251 MediumPage* Heap::allocateMediumPage(std::lock_guard<StaticMutex>& lock, size_t sizeClass)
252 {
253     Vector<MediumPage*>& mediumPagesWithFreeLines = m_mediumPagesWithFreeLines[sizeClass];
254     while (mediumPagesWithFreeLines.size()) {
255         MediumPage* page = mediumPagesWithFreeLines.pop();
256         if (!page->refCount(lock) || page->sizeClass() != sizeClass) // Page was promoted to the pages list.
257             continue;
258         return page;
259     }
260     
261     m_isAllocatingPages = true;
262
263     MediumPage* page = [this, sizeClass]() {
264         if (m_mediumPages.size())
265             return m_mediumPages.pop();
266         return m_vmHeap.allocateMediumPage();
267     }();
268
269     page->setSizeClass(sizeClass);
270     return page;
271 }
272
273 void Heap::deallocateSmallLine(std::lock_guard<StaticMutex>& lock, SmallLine* line)
274 {
275     BASSERT(!line->refCount(lock));
276     SmallPage* page = SmallPage::get(line);
277     size_t refCount = page->refCount(lock);
278     page->deref(lock);
279
280     switch (refCount) {
281     case SmallPage::lineCount: {
282         // First free line in the page.
283         m_smallPagesWithFreeLines[page->sizeClass()].push(page);
284         break;
285     }
286     case 1: {
287         // Last free line in the page.
288         m_smallPages.push(page);
289         m_scavenger.run();
290         break;
291     }
292     }
293 }
294
295 void Heap::deallocateMediumLine(std::lock_guard<StaticMutex>& lock, MediumLine* line)
296 {
297     BASSERT(!line->refCount(lock));
298     MediumPage* page = MediumPage::get(line);
299     size_t refCount = page->refCount(lock);
300     page->deref(lock);
301
302     switch (refCount) {
303     case MediumPage::lineCount: {
304         // First free line in the page.
305         m_mediumPagesWithFreeLines[page->sizeClass()].push(page);
306         break;
307     }
308     case 1: {
309         // Last free line in the page.
310         m_mediumPages.push(page);
311         m_scavenger.run();
312         break;
313     }
314     }
315 }
316
317 void* Heap::allocateXLarge(std::lock_guard<StaticMutex>&, size_t alignment, size_t size)
318 {
319     BASSERT(isPowerOfTwo(alignment));
320     BASSERT(alignment >= xLargeAlignment);
321     BASSERT(size == roundUpToMultipleOf<xLargeAlignment>(size));
322
323     m_isAllocatingPages = true;
324
325     void* result = vmAllocate(alignment, size);
326     m_xLargeObjects.push(Range(result, size));
327     return result;
328 }
329
330 void* Heap::allocateXLarge(std::lock_guard<StaticMutex>& lock, size_t size)
331 {
332     return allocateXLarge(lock, superChunkSize, size);
333 }
334
335 Range Heap::findXLarge(std::lock_guard<StaticMutex>&, void* object)
336 {
337     for (auto& range : m_xLargeObjects) {
338         if (range.begin() != object)
339             continue;
340         return range;
341     }
342
343     return Range();
344 }
345
346 void Heap::deallocateXLarge(std::unique_lock<StaticMutex>& lock, void* object)
347 {
348     for (auto& range : m_xLargeObjects) {
349         if (range.begin() != object)
350             continue;
351
352         Range toDeallocate = m_xLargeObjects.pop(&range);
353
354         lock.unlock();
355         vmDeallocate(toDeallocate.begin(), toDeallocate.size());
356         lock.lock();
357         
358         break;
359     }
360 }
361
362 void* Heap::allocateLarge(std::lock_guard<StaticMutex>&, LargeObject& largeObject, size_t size)
363 {
364     BASSERT(largeObject.isFree());
365
366     if (largeObject.size() - size > largeMin) {
367         std::pair<LargeObject, LargeObject> split = largeObject.split(size);
368         largeObject = split.first;
369         m_largeObjects.insert(split.second);
370     }
371
372     largeObject.setFree(false);
373     return largeObject.begin();
374 }
375
376 void* Heap::allocateLarge(std::lock_guard<StaticMutex>& lock, size_t size)
377 {
378     BASSERT(size <= largeMax);
379     BASSERT(size >= largeMin);
380     BASSERT(size == roundUpToMultipleOf<largeAlignment>(size));
381     
382     m_isAllocatingPages = true;
383
384     LargeObject largeObject = m_largeObjects.take(size);
385     if (!largeObject)
386         largeObject = m_vmHeap.allocateLargeObject(size);
387
388     return allocateLarge(lock, largeObject, size);
389 }
390
391 void* Heap::allocateLarge(std::lock_guard<StaticMutex>& lock, size_t alignment, size_t size, size_t unalignedSize)
392 {
393     BASSERT(size <= largeMax);
394     BASSERT(size >= largeMin);
395     BASSERT(size == roundUpToMultipleOf<largeAlignment>(size));
396     BASSERT(unalignedSize <= largeMax);
397     BASSERT(unalignedSize >= largeMin);
398     BASSERT(unalignedSize == roundUpToMultipleOf<largeAlignment>(unalignedSize));
399     BASSERT(alignment <= largeChunkSize / 2);
400     BASSERT(alignment >= largeAlignment);
401     BASSERT(isPowerOfTwo(alignment));
402
403     m_isAllocatingPages = true;
404
405     LargeObject largeObject = m_largeObjects.take(alignment, size, unalignedSize);
406     if (!largeObject)
407         largeObject = m_vmHeap.allocateLargeObject(alignment, size, unalignedSize);
408
409     size_t alignmentMask = alignment - 1;
410     if (test(largeObject.begin(), alignmentMask)) {
411         size_t prefixSize = roundUpToMultipleOf(alignment, largeObject.begin() + largeMin) - largeObject.begin();
412         std::pair<LargeObject, LargeObject> pair = largeObject.split(prefixSize);
413         m_largeObjects.insert(pair.first);
414         largeObject = pair.second;
415     }
416
417     return allocateLarge(lock, largeObject, size);
418 }
419
420 void Heap::deallocateLarge(std::lock_guard<StaticMutex>&, const LargeObject& largeObject)
421 {
422     BASSERT(!largeObject.isFree());
423     largeObject.setFree(true);
424     
425     LargeObject merged = largeObject.merge();
426     m_largeObjects.insert(merged);
427     m_scavenger.run();
428 }
429
430 void Heap::deallocateLarge(std::lock_guard<StaticMutex>& lock, void* object)
431 {
432     LargeObject largeObject(object);
433     deallocateLarge(lock, largeObject);
434 }
435
436 } // namespace bmalloc