02a0c27130229693962c4fdec35f344dfb5e095c
[WebKit.git] / Source / bmalloc / bmalloc / Heap.cpp
1 /*
2  * Copyright (C) 2014-2016 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "Heap.h"
27
28 #if BPLATFORM(IOS)
29 #include "AvailableMemory.h"
30 #endif
31 #include "BumpAllocator.h"
32 #include "Chunk.h"
33 #include "DebugHeap.h"
34 #include "PerProcess.h"
35 #include "SmallLine.h"
36 #include "SmallPage.h"
37 #include <thread>
38
39 #if BOS(DARWIN)
40 #include "bmalloc.h"
41 #if BPLATFORM(IOS)
42 #import <mach/host_info.h>
43 #import <mach/mach.h>
44 #import <mach/mach_error.h>
45 #endif
46 #endif
47
48 namespace bmalloc {
49
50 Heap::Heap(std::lock_guard<StaticMutex>&)
51     : m_vmPageSizePhysical(vmPageSizePhysical())
52     , m_scavenger(*this, &Heap::concurrentScavenge)
53     , m_debugHeap(nullptr)
54 #if BPLATFORM(IOS)
55     , m_maxAvailableMemory(availableMemory())
56 #endif
57 {
58     RELEASE_BASSERT(vmPageSizePhysical() >= smallPageSize);
59     RELEASE_BASSERT(vmPageSize() >= vmPageSizePhysical());
60
61     initializeLineMetadata();
62     initializePageMetadata();
63     
64     if (m_environment.isDebugHeapEnabled())
65         m_debugHeap = PerProcess<DebugHeap>::get();
66
67 #if BOS(DARWIN)
68     auto queue = dispatch_queue_create("WebKit Malloc Memory Pressure Handler", DISPATCH_QUEUE_SERIAL);
69     m_pressureHandlerDispatchSource = dispatch_source_create(DISPATCH_SOURCE_TYPE_MEMORYPRESSURE, 0, DISPATCH_MEMORYPRESSURE_CRITICAL, queue);
70     dispatch_source_set_event_handler(m_pressureHandlerDispatchSource, ^{
71         std::unique_lock<StaticMutex> lock(PerProcess<Heap>::mutex());
72         scavenge(lock);
73     });
74     dispatch_resume(m_pressureHandlerDispatchSource);
75     dispatch_release(queue);
76 #endif
77 }
78
79 void Heap::initializeLineMetadata()
80 {
81     size_t sizeClassCount = bmalloc::sizeClass(smallLineSize);
82     size_t smallLineCount = m_vmPageSizePhysical / smallLineSize;
83     m_smallLineMetadata.grow(sizeClassCount * smallLineCount);
84
85     for (size_t sizeClass = 0; sizeClass < sizeClassCount; ++sizeClass) {
86         size_t size = objectSize(sizeClass);
87         LineMetadata* pageMetadata = &m_smallLineMetadata[sizeClass * smallLineCount];
88
89         size_t object = 0;
90         size_t line = 0;
91         while (object < m_vmPageSizePhysical) {
92             line = object / smallLineSize;
93             size_t leftover = object % smallLineSize;
94
95             size_t objectCount;
96             size_t remainder;
97             divideRoundingUp(smallLineSize - leftover, size, objectCount, remainder);
98
99             pageMetadata[line] = { static_cast<unsigned char>(leftover), static_cast<unsigned char>(objectCount) };
100
101             object += objectCount * size;
102         }
103
104         // Don't allow the last object in a page to escape the page.
105         if (object > m_vmPageSizePhysical) {
106             BASSERT(pageMetadata[line].objectCount);
107             --pageMetadata[line].objectCount;
108         }
109     }
110 }
111
112 void Heap::initializePageMetadata()
113 {
114     auto computePageSize = [&](size_t sizeClass) {
115         size_t size = objectSize(sizeClass);
116         if (sizeClass < bmalloc::sizeClass(smallLineSize))
117             return m_vmPageSizePhysical;
118
119         for (size_t pageSize = m_vmPageSizePhysical;
120             pageSize < pageSizeMax;
121             pageSize += m_vmPageSizePhysical) {
122             RELEASE_BASSERT(pageSize <= chunkSize / 2);
123             size_t waste = pageSize % size;
124             if (waste <= pageSize / pageSizeWasteFactor)
125                 return pageSize;
126         }
127         
128         return pageSizeMax;
129     };
130
131     for (size_t i = 0; i < sizeClassCount; ++i)
132         m_pageClasses[i] = (computePageSize(i) - 1) / smallPageSize;
133 }
134
135 #if BPLATFORM(IOS)
136 void Heap::updateMemoryInUseParameters()
137 {
138     task_vm_info_data_t vmInfo;
139     mach_msg_type_number_t vmSize = TASK_VM_INFO_COUNT;
140     
141     if (KERN_SUCCESS != task_info(mach_task_self(), TASK_VM_INFO, (task_info_t)(&vmInfo), &vmSize))
142         m_memoryFootprint = 0;
143     else
144         m_memoryFootprint = static_cast<size_t>(vmInfo.phys_footprint);
145
146     double percentInUse = static_cast<double>(m_memoryFootprint) / static_cast<double>(m_maxAvailableMemory);
147     m_percentAvailableMemoryInUse = std::min(percentInUse, 1.0);
148 }
149 #endif
150
151 void Heap::concurrentScavenge()
152 {
153     std::unique_lock<StaticMutex> lock(PerProcess<Heap>::mutex());
154
155 #if BOS(DARWIN)
156     pthread_set_qos_class_self_np(m_requestedScavengerThreadQOSClass, 0);
157 #endif
158
159     if (m_isGrowing && !isUnderMemoryPressure()) {
160         m_isGrowing = false;
161         m_scavenger.runSoon();
162         return;
163     }
164     
165     scavenge(lock);
166 }
167
168 void Heap::scavenge(std::unique_lock<StaticMutex>& lock)
169 {
170     scavengeSmallPages(lock);
171     scavengeLargeObjects(lock);
172 }
173     
174 void Heap::scavengeSmallPages(std::unique_lock<StaticMutex>& lock)
175 {
176     for (size_t pageClass = 0; pageClass < pageClassCount; pageClass++) {
177         auto& smallPages = m_smallPages[pageClass];
178
179         while (!smallPages.isEmpty()) {
180             SmallPage* page = smallPages.pop();
181             m_vmHeap.deallocateSmallPage(lock, pageClass, page);
182         }
183     }
184 }
185
186 void Heap::scavengeLargeObjects(std::unique_lock<StaticMutex>& lock)
187 {
188     auto& ranges = m_largeFree.ranges();
189     for (size_t i = ranges.size(); i-- > 0; i = std::min(i, ranges.size())) {
190         auto range = ranges.pop(i);
191         
192         lock.unlock();
193         vmDeallocatePhysicalPagesSloppy(range.begin(), range.size());
194         lock.lock();
195
196         range.setPhysicalSize(0);
197         ranges.push(range);
198     }
199 }
200
201 void Heap::scheduleScavengerIfUnderMemoryPressure(size_t bytes)
202 {
203     m_scavengerBytes += bytes;
204     if (m_scavengerBytes < scavengerBytesPerMemoryPressureCheck)
205         return;
206
207     m_scavengerBytes = 0;
208
209     if (m_scavenger.willRun())
210         return;
211
212     if (!isUnderMemoryPressure())
213         return;
214
215     m_isGrowing = false;
216     m_scavenger.run();
217 }
218
219 void Heap::scheduleScavenger(size_t bytes)
220 {
221     scheduleScavengerIfUnderMemoryPressure(bytes);
222
223     if (m_scavenger.willRunSoon())
224         return;
225
226     m_isGrowing = false;
227     m_scavenger.runSoon();
228 }
229
230 SmallPage* Heap::allocateSmallPage(std::lock_guard<StaticMutex>& lock, size_t sizeClass)
231 {
232     if (!m_smallPagesWithFreeLines[sizeClass].isEmpty())
233         return m_smallPagesWithFreeLines[sizeClass].popFront();
234
235     m_isGrowing = true;
236     
237     SmallPage* page = [&]() {
238         size_t pageClass = m_pageClasses[sizeClass];
239         if (!m_smallPages[pageClass].isEmpty())
240             return m_smallPages[pageClass].pop();
241
242         scheduleScavengerIfUnderMemoryPressure(pageSize(pageClass));
243         
244         SmallPage* page = m_vmHeap.allocateSmallPage(lock, pageClass);
245         m_objectTypes.set(Chunk::get(page), ObjectType::Small);
246         return page;
247     }();
248
249     page->setSizeClass(sizeClass);
250     return page;
251 }
252
253 void Heap::deallocateSmallLine(std::lock_guard<StaticMutex>& lock, Object object)
254 {
255     BASSERT(!object.line()->refCount(lock));
256     SmallPage* page = object.page();
257     page->deref(lock);
258
259     if (!page->hasFreeLines(lock)) {
260         page->setHasFreeLines(lock, true);
261         m_smallPagesWithFreeLines[page->sizeClass()].push(page);
262     }
263
264     if (page->refCount(lock))
265         return;
266
267     size_t sizeClass = page->sizeClass();
268     size_t pageClass = m_pageClasses[sizeClass];
269
270     m_smallPagesWithFreeLines[sizeClass].remove(page);
271     m_smallPages[pageClass].push(page);
272     
273     scheduleScavenger(pageSize(pageClass));
274 }
275
276 void Heap::allocateSmallBumpRangesByMetadata(
277     std::lock_guard<StaticMutex>& lock, size_t sizeClass,
278     BumpAllocator& allocator, BumpRangeCache& rangeCache)
279 {
280     SmallPage* page = allocateSmallPage(lock, sizeClass);
281     SmallLine* lines = page->begin();
282     BASSERT(page->hasFreeLines(lock));
283     size_t smallLineCount = m_vmPageSizePhysical / smallLineSize;
284     LineMetadata* pageMetadata = &m_smallLineMetadata[sizeClass * smallLineCount];
285     
286     auto findSmallBumpRange = [&](size_t& lineNumber) {
287         for ( ; lineNumber < smallLineCount; ++lineNumber) {
288             if (!lines[lineNumber].refCount(lock)) {
289                 if (pageMetadata[lineNumber].objectCount)
290                     return true;
291             }
292         }
293         return false;
294     };
295
296     auto allocateSmallBumpRange = [&](size_t& lineNumber) -> BumpRange {
297         char* begin = lines[lineNumber].begin() + pageMetadata[lineNumber].startOffset;
298         unsigned short objectCount = 0;
299         
300         for ( ; lineNumber < smallLineCount; ++lineNumber) {
301             if (lines[lineNumber].refCount(lock))
302                 break;
303
304             if (!pageMetadata[lineNumber].objectCount)
305                 continue;
306
307             objectCount += pageMetadata[lineNumber].objectCount;
308             lines[lineNumber].ref(lock, pageMetadata[lineNumber].objectCount);
309             page->ref(lock);
310         }
311         return { begin, objectCount };
312     };
313
314     size_t lineNumber = 0;
315     for (;;) {
316         if (!findSmallBumpRange(lineNumber)) {
317             page->setHasFreeLines(lock, false);
318             BASSERT(allocator.canAllocate());
319             return;
320         }
321
322         // In a fragmented page, some free ranges might not fit in the cache.
323         if (rangeCache.size() == rangeCache.capacity()) {
324             m_smallPagesWithFreeLines[sizeClass].push(page);
325             BASSERT(allocator.canAllocate());
326             return;
327         }
328
329         BumpRange bumpRange = allocateSmallBumpRange(lineNumber);
330         if (allocator.canAllocate())
331             rangeCache.push(bumpRange);
332         else
333             allocator.refill(bumpRange);
334     }
335 }
336
337 void Heap::allocateSmallBumpRangesByObject(
338     std::lock_guard<StaticMutex>& lock, size_t sizeClass,
339     BumpAllocator& allocator, BumpRangeCache& rangeCache)
340 {
341     size_t size = allocator.size();
342     SmallPage* page = allocateSmallPage(lock, sizeClass);
343     BASSERT(page->hasFreeLines(lock));
344
345     auto findSmallBumpRange = [&](Object& it, Object& end) {
346         for ( ; it + size <= end; it = it + size) {
347             if (!it.line()->refCount(lock))
348                 return true;
349         }
350         return false;
351     };
352
353     auto allocateSmallBumpRange = [&](Object& it, Object& end) -> BumpRange {
354         char* begin = it.address();
355         unsigned short objectCount = 0;
356         for ( ; it + size <= end; it = it + size) {
357             if (it.line()->refCount(lock))
358                 break;
359
360             ++objectCount;
361             it.line()->ref(lock);
362             it.page()->ref(lock);
363         }
364         return { begin, objectCount };
365     };
366
367     Object it(page->begin()->begin());
368     Object end(it + pageSize(m_pageClasses[sizeClass]));
369     for (;;) {
370         if (!findSmallBumpRange(it, end)) {
371             page->setHasFreeLines(lock, false);
372             BASSERT(allocator.canAllocate());
373             return;
374         }
375
376         // In a fragmented page, some free ranges might not fit in the cache.
377         if (rangeCache.size() == rangeCache.capacity()) {
378             m_smallPagesWithFreeLines[sizeClass].push(page);
379             BASSERT(allocator.canAllocate());
380             return;
381         }
382
383         BumpRange bumpRange = allocateSmallBumpRange(it, end);
384         if (allocator.canAllocate())
385             rangeCache.push(bumpRange);
386         else
387             allocator.refill(bumpRange);
388     }
389 }
390
391 LargeRange Heap::splitAndAllocate(LargeRange& range, size_t alignment, size_t size)
392 {
393     LargeRange prev;
394     LargeRange next;
395
396     size_t alignmentMask = alignment - 1;
397     if (test(range.begin(), alignmentMask)) {
398         size_t prefixSize = roundUpToMultipleOf(alignment, range.begin()) - range.begin();
399         std::pair<LargeRange, LargeRange> pair = range.split(prefixSize);
400         prev = pair.first;
401         range = pair.second;
402     }
403
404     if (range.size() - size > size / pageSizeWasteFactor) {
405         std::pair<LargeRange, LargeRange> pair = range.split(size);
406         range = pair.first;
407         next = pair.second;
408     }
409     
410     if (range.physicalSize() < range.size()) {
411         scheduleScavengerIfUnderMemoryPressure(range.size());
412         
413         vmAllocatePhysicalPagesSloppy(range.begin() + range.physicalSize(), range.size() - range.physicalSize());
414         range.setPhysicalSize(range.size());
415     }
416     
417     if (prev)
418         m_largeFree.add(prev);
419
420     if (next)
421         m_largeFree.add(next);
422
423     m_objectTypes.set(Chunk::get(range.begin()), ObjectType::Large);
424
425     m_largeAllocated.set(range.begin(), range.size());
426     return range;
427 }
428
429 void* Heap::tryAllocateLarge(std::lock_guard<StaticMutex>& lock, size_t alignment, size_t size)
430 {
431     BASSERT(isPowerOfTwo(alignment));
432
433     m_isGrowing = true;
434     
435     size_t roundedSize = size ? roundUpToMultipleOf(largeAlignment, size) : largeAlignment;
436     if (roundedSize < size) // Check for overflow
437         return nullptr;
438     size = roundedSize;
439
440     size_t roundedAlignment = roundUpToMultipleOf<largeAlignment>(alignment);
441     if (roundedAlignment < alignment) // Check for overflow
442         return nullptr;
443     alignment = roundedAlignment;
444
445     LargeRange range = m_largeFree.remove(alignment, size);
446     if (!range) {
447         range = m_vmHeap.tryAllocateLargeChunk(lock, alignment, size);
448         if (!range)
449             return nullptr;
450
451         m_largeFree.add(range);
452         range = m_largeFree.remove(alignment, size);
453     }
454
455     return splitAndAllocate(range, alignment, size).begin();
456 }
457
458 void* Heap::allocateLarge(std::lock_guard<StaticMutex>& lock, size_t alignment, size_t size)
459 {
460     void* result = tryAllocateLarge(lock, alignment, size);
461     RELEASE_BASSERT(result);
462     return result;
463 }
464
465 bool Heap::isLarge(std::lock_guard<StaticMutex>&, void* object)
466 {
467     return m_objectTypes.get(Object(object).chunk()) == ObjectType::Large;
468 }
469
470 size_t Heap::largeSize(std::lock_guard<StaticMutex>&, void* object)
471 {
472     return m_largeAllocated.get(object);
473 }
474
475 void Heap::shrinkLarge(std::lock_guard<StaticMutex>&, const Range& object, size_t newSize)
476 {
477     BASSERT(object.size() > newSize);
478
479     size_t size = m_largeAllocated.remove(object.begin());
480     LargeRange range = LargeRange(object, size);
481     splitAndAllocate(range, alignment, newSize);
482
483     scheduleScavenger(size);
484 }
485
486 void Heap::deallocateLarge(std::lock_guard<StaticMutex>&, void* object)
487 {
488     size_t size = m_largeAllocated.remove(object);
489     m_largeFree.add(LargeRange(object, size, size));
490     
491     scheduleScavenger(size);
492 }
493
494 } // namespace bmalloc