Unreviewed, follow-up patch after r230474
[WebKit-https.git] / Source / bmalloc / bmalloc / Scavenger.cpp
1 /*
2  * Copyright (C) 2017-2018 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "Scavenger.h"
27
28 #include "AllIsoHeapsInlines.h"
29 #include "AvailableMemory.h"
30 #include "BulkDecommit.h"
31 #include "Environment.h"
32 #include "Heap.h"
33 #if BOS(DARWIN)
34 #import <dispatch/dispatch.h>
35 #import <mach/host_info.h>
36 #import <mach/mach.h>
37 #import <mach/mach_error.h>
38 #endif
39 #include <thread>
40
41 namespace bmalloc {
42
43 static constexpr bool verbose = false;
44
45 struct PrintTime {
46     PrintTime(const char* str) 
47         : string(str)
48     { }
49
50     ~PrintTime()
51     {
52         if (!printed)
53             print();
54     }
55     void print()
56     {
57         if (verbose) {
58             fprintf(stderr, "%s %lfms\n", string, static_cast<double>(std::chrono::duration_cast<std::chrono::microseconds>(std::chrono::steady_clock::now() - start).count()) / 1000);
59             printed = true;
60         }
61     }
62     const char* string;
63     std::chrono::steady_clock::time_point start { std::chrono::steady_clock::now() };
64     bool printed { false };
65 };
66
67 Scavenger::Scavenger(std::lock_guard<Mutex>&)
68 {
69 #if BOS(DARWIN)
70     auto queue = dispatch_queue_create("WebKit Malloc Memory Pressure Handler", DISPATCH_QUEUE_SERIAL);
71     m_pressureHandlerDispatchSource = dispatch_source_create(DISPATCH_SOURCE_TYPE_MEMORYPRESSURE, 0, DISPATCH_MEMORYPRESSURE_CRITICAL, queue);
72     dispatch_source_set_event_handler(m_pressureHandlerDispatchSource, ^{
73         scavenge();
74     });
75     dispatch_resume(m_pressureHandlerDispatchSource);
76     dispatch_release(queue);
77 #endif
78     
79     m_thread = std::thread(&threadEntryPoint, this);
80 }
81
82 void Scavenger::run()
83 {
84     std::lock_guard<Mutex> lock(m_mutex);
85     runHoldingLock();
86 }
87
88 void Scavenger::runHoldingLock()
89 {
90     m_state = State::Run;
91     m_condition.notify_all();
92 }
93
94 void Scavenger::runSoon()
95 {
96     std::lock_guard<Mutex> lock(m_mutex);
97     runSoonHoldingLock();
98 }
99
100 void Scavenger::runSoonHoldingLock()
101 {
102     if (willRunSoon())
103         return;
104     m_state = State::RunSoon;
105     m_condition.notify_all();
106 }
107
108 void Scavenger::didStartGrowing()
109 {
110     // We don't really need to lock here, since this is just a heuristic.
111     m_isProbablyGrowing = true;
112 }
113
114 void Scavenger::scheduleIfUnderMemoryPressure(size_t bytes)
115 {
116     std::lock_guard<Mutex> lock(m_mutex);
117     scheduleIfUnderMemoryPressureHoldingLock(bytes);
118 }
119
120 void Scavenger::scheduleIfUnderMemoryPressureHoldingLock(size_t bytes)
121 {
122     m_scavengerBytes += bytes;
123     if (m_scavengerBytes < scavengerBytesPerMemoryPressureCheck)
124         return;
125
126     m_scavengerBytes = 0;
127
128     if (willRun())
129         return;
130
131     if (!isUnderMemoryPressure())
132         return;
133
134     m_isProbablyGrowing = false;
135     runHoldingLock();
136 }
137
138 void Scavenger::schedule(size_t bytes)
139 {
140     std::lock_guard<Mutex> lock(m_mutex);
141     scheduleIfUnderMemoryPressureHoldingLock(bytes);
142     
143     if (willRunSoon())
144         return;
145     
146     m_isProbablyGrowing = false;
147     runSoonHoldingLock();
148 }
149
150 inline void dumpStats()
151 {
152     auto dump = [] (auto* string, auto size) {
153         fprintf(stderr, "%s %zuMB\n", string, static_cast<size_t>(size) / 1024 / 1024);
154     };
155
156 #if BOS(DARWIN)
157     task_vm_info_data_t vmInfo;
158     mach_msg_type_number_t vmSize = TASK_VM_INFO_COUNT;
159     if (KERN_SUCCESS == task_info(mach_task_self(), TASK_VM_INFO, (task_info_t)(&vmInfo), &vmSize)) {
160         dump("phys_footrpint", vmInfo.phys_footprint);
161         dump("internal+compressed", vmInfo.internal + vmInfo.compressed);
162     }
163 #endif
164
165     dump("bmalloc-freeable", PerProcess<Scavenger>::get()->freeableMemory());
166     dump("bmalloc-footprint", PerProcess<Scavenger>::get()->footprint());
167 }
168
169 std::chrono::milliseconds Scavenger::timeSinceLastFullScavenge()
170 {
171     std::unique_lock<Mutex> lock(m_mutex);
172     return std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::steady_clock::now() - m_lastFullScavengeTime);
173 }
174
175 std::chrono::milliseconds Scavenger::timeSinceLastPartialScavenge()
176 {
177     std::unique_lock<Mutex> lock(m_mutex);
178     return std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::steady_clock::now() - m_lastPartialScavengeTime);
179 }
180
181 void Scavenger::scavenge()
182 {
183     std::unique_lock<Mutex> lock(m_scavengingMutex);
184
185     if (verbose) {
186         fprintf(stderr, "--------------------------------\n");
187         fprintf(stderr, "--before scavenging--\n");
188         dumpStats();
189     }
190
191     {
192         BulkDecommit decommitter;
193
194         {
195             PrintTime printTime("\nfull scavenge under lock time");
196             std::lock_guard<Mutex> lock(Heap::mutex());
197             for (unsigned i = numHeaps; i--;) {
198                 if (!isActiveHeapKind(static_cast<HeapKind>(i)))
199                     continue;
200                 PerProcess<PerHeapKind<Heap>>::get()->at(i).scavenge(lock, decommitter);
201             }
202             decommitter.processEager();
203         }
204
205         {
206             PrintTime printTime("full scavenge lazy decommit time");
207             decommitter.processLazy();
208         }
209
210         {
211             PrintTime printTime("full scavenge mark all as eligible time");
212             std::lock_guard<Mutex> lock(Heap::mutex());
213             for (unsigned i = numHeaps; i--;) {
214                 if (!isActiveHeapKind(static_cast<HeapKind>(i)))
215                     continue;
216                 PerProcess<PerHeapKind<Heap>>::get()->at(i).markAllLargeAsEligibile(lock);
217             }
218         }
219     }
220
221     {
222         RELEASE_BASSERT(!m_deferredDecommits.size());
223         PerProcess<AllIsoHeaps>::get()->forEach(
224             [&] (IsoHeapImplBase& heap) {
225                 heap.scavenge(m_deferredDecommits);
226             });
227         IsoHeapImplBase::finishScavenging(m_deferredDecommits);
228         m_deferredDecommits.shrink(0);
229     }
230
231     if (verbose) {
232         fprintf(stderr, "--after scavenging--\n");
233         dumpStats();
234         fprintf(stderr, "--------------------------------\n");
235     }
236
237     {
238         std::unique_lock<Mutex> lock(m_mutex);
239         m_lastFullScavengeTime = std::chrono::steady_clock::now();
240     }
241 }
242
243 void Scavenger::partialScavenge()
244 {
245     std::unique_lock<Mutex> lock(m_scavengingMutex);
246
247     if (verbose) {
248         fprintf(stderr, "--------------------------------\n");
249         fprintf(stderr, "--before partial scavenging--\n");
250         dumpStats();
251     }
252
253     {
254         BulkDecommit decommitter;
255         {
256             PrintTime printTime("\npartialScavenge under lock time");
257             std::lock_guard<Mutex> lock(Heap::mutex());
258             for (unsigned i = numHeaps; i--;) {
259                 if (!isActiveHeapKind(static_cast<HeapKind>(i)))
260                     continue;
261                 Heap& heap = PerProcess<PerHeapKind<Heap>>::get()->at(i);
262                 size_t freeableMemory = heap.freeableMemory(lock);
263                 if (freeableMemory < 4 * MB)
264                     continue;
265                 heap.scavengeToHighWatermark(lock, decommitter);
266             }
267
268             decommitter.processEager();
269         }
270
271         {
272             PrintTime printTime("partialScavenge lazy decommit time");
273             decommitter.processLazy();
274         }
275
276         {
277             PrintTime printTime("partialScavenge mark all as eligible time");
278             std::lock_guard<Mutex> lock(Heap::mutex());
279             for (unsigned i = numHeaps; i--;) {
280                 if (!isActiveHeapKind(static_cast<HeapKind>(i)))
281                     continue;
282                 Heap& heap = PerProcess<PerHeapKind<Heap>>::get()->at(i);
283                 heap.markAllLargeAsEligibile(lock);
284             }
285         }
286     }
287
288     {
289         RELEASE_BASSERT(!m_deferredDecommits.size());
290         PerProcess<AllIsoHeaps>::get()->forEach(
291             [&] (IsoHeapImplBase& heap) {
292                 heap.scavengeToHighWatermark(m_deferredDecommits);
293             });
294         IsoHeapImplBase::finishScavenging(m_deferredDecommits);
295         m_deferredDecommits.shrink(0);
296     }
297
298     if (verbose) {
299         fprintf(stderr, "--after partial scavenging--\n");
300         dumpStats();
301         fprintf(stderr, "--------------------------------\n");
302     }
303
304     {
305         std::unique_lock<Mutex> lock(m_mutex);
306         m_lastPartialScavengeTime = std::chrono::steady_clock::now();
307     }
308 }
309
310 size_t Scavenger::freeableMemory()
311 {
312     size_t result = 0;
313     {
314         std::lock_guard<Mutex> lock(Heap::mutex());
315         for (unsigned i = numHeaps; i--;) {
316             if (!isActiveHeapKind(static_cast<HeapKind>(i)))
317                 continue;
318             result += PerProcess<PerHeapKind<Heap>>::get()->at(i).freeableMemory(lock);
319         }
320     }
321
322     PerProcess<AllIsoHeaps>::get()->forEach(
323         [&] (IsoHeapImplBase& heap) {
324             result += heap.freeableMemory();
325         });
326
327     return result;
328 }
329
330 size_t Scavenger::footprint()
331 {
332     RELEASE_BASSERT(!PerProcess<Environment>::get()->isDebugHeapEnabled());
333
334     size_t result = 0;
335     for (unsigned i = numHeaps; i--;) {
336         if (!isActiveHeapKind(static_cast<HeapKind>(i)))
337             continue;
338         result += PerProcess<PerHeapKind<Heap>>::get()->at(i).footprint();
339     }
340
341     PerProcess<AllIsoHeaps>::get()->forEach(
342         [&] (IsoHeapImplBase& heap) {
343             result += heap.footprint();
344         });
345
346     return result;
347 }
348
349 void Scavenger::threadEntryPoint(Scavenger* scavenger)
350 {
351     scavenger->threadRunLoop();
352 }
353
354 void Scavenger::threadRunLoop()
355 {
356     setSelfQOSClass();
357 #if BOS(DARWIN)
358     setThreadName("JavaScriptCore bmalloc scavenger");
359 #else
360     setThreadName("BMScavenger");
361 #endif
362     
363     // This loop ratchets downward from most active to least active state. While
364     // we ratchet downward, any other thread may reset our state.
365     
366     // We require any state change while we are sleeping to signal to our
367     // condition variable and wake us up.
368     
369     auto truth = [] { return true; };
370     
371     while (truth()) {
372         if (m_state == State::Sleep) {
373             std::unique_lock<Mutex> lock(m_mutex);
374             m_condition.wait(lock, [&]() { return m_state != State::Sleep; });
375         }
376         
377         if (m_state == State::RunSoon) {
378             std::unique_lock<Mutex> lock(m_mutex);
379             m_condition.wait_for(lock, asyncTaskSleepDuration, [&]() { return m_state != State::RunSoon; });
380         }
381         
382         m_state = State::Sleep;
383         
384         setSelfQOSClass();
385         
386         if (verbose) {
387             fprintf(stderr, "--------------------------------\n");
388             fprintf(stderr, "considering running scavenger\n");
389             dumpStats();
390             fprintf(stderr, "--------------------------------\n");
391         }
392
393         enum class ScavengeMode {
394             None,
395             Partial,
396             Full
397         };
398
399         size_t freeableMemory = this->freeableMemory();
400
401         ScavengeMode scavengeMode = [&] {
402             auto timeSinceLastFullScavenge = this->timeSinceLastFullScavenge();
403             auto timeSinceLastPartialScavenge = this->timeSinceLastPartialScavenge();
404             auto timeSinceLastScavenge = std::min(timeSinceLastPartialScavenge, timeSinceLastFullScavenge);
405             if (isUnderMemoryPressure() && freeableMemory > 4 * MB && timeSinceLastScavenge > std::chrono::milliseconds(5))
406                 return ScavengeMode::Full;
407
408             if (!m_isProbablyGrowing) {
409                 if (timeSinceLastFullScavenge < std::chrono::milliseconds(1000))
410                     return ScavengeMode::Partial;
411                 return ScavengeMode::Full;
412             }
413
414 #if BCPU(X86_64)
415             auto partialScavengeInterval = std::chrono::milliseconds(12000);
416 #else
417             auto partialScavengeInterval = std::chrono::milliseconds(8000);
418 #endif
419             if (timeSinceLastScavenge < partialScavengeInterval) {
420                 // Rate limit partial scavenges.
421                 return ScavengeMode::None;
422             }
423             if (freeableMemory < 50 * MB)
424                 return ScavengeMode::None;
425             if (5 * freeableMemory < footprint())
426                 return ScavengeMode::None;
427             return ScavengeMode::Partial;
428         }();
429
430         m_isProbablyGrowing = false;
431
432         switch (scavengeMode) {
433         case ScavengeMode::None: {
434             runSoon();
435             break;
436         }
437         case ScavengeMode::Partial: {
438             partialScavenge();
439             runSoon();
440             break;
441         }
442         case ScavengeMode::Full: {
443             scavenge();
444             break;
445         }
446         }
447     }
448 }
449
450 void Scavenger::setThreadName(const char* name)
451 {
452     BUNUSED(name);
453 #if BOS(DARWIN)
454     pthread_setname_np(name);
455 #elif BOS(LINUX)
456     // Truncate the given name since Linux limits the size of the thread name 16 including null terminator.
457     std::array<char, 16> buf;
458     strncpy(buf.data(), name, buf.size() - 1);
459     buf[buf.size() - 1] = '\0';
460     pthread_setname_np(pthread_self(), buf.data());
461 #endif
462 }
463
464 void Scavenger::setSelfQOSClass()
465 {
466 #if BOS(DARWIN)
467     pthread_set_qos_class_self_np(requestedScavengerThreadQOSClass(), 0);
468 #endif
469 }
470
471 } // namespace bmalloc
472