REGRESSION (r205462): Lot of leaks
[WebKit-https.git] / Source / JavaScriptCore / heap / MarkedSpace.cpp
1 /*
2  *  Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2016 Apple Inc. All rights reserved.
3  *  Copyright (C) 2007 Eric Seidel <eric@webkit.org>
4  *
5  *  This library is free software; you can redistribute it and/or
6  *  modify it under the terms of the GNU Lesser General Public
7  *  License as published by the Free Software Foundation; either
8  *  version 2 of the License, or (at your option) any later version.
9  *
10  *  This library is distributed in the hope that it will be useful,
11  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
12  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  *  Lesser General Public License for more details.
14  *
15  *  You should have received a copy of the GNU Lesser General Public
16  *  License along with this library; if not, write to the Free Software
17  *  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA
18  *
19  */
20
21 #include "config.h"
22 #include "MarkedSpace.h"
23
24 #include "IncrementalSweeper.h"
25 #include "JSObject.h"
26 #include "JSCInlines.h"
27 #include "SuperSampler.h"
28 #include <wtf/ListDump.h>
29
30 namespace JSC {
31
32 std::array<size_t, MarkedSpace::numSizeClasses> MarkedSpace::s_sizeClassForSizeStep;
33
34 namespace {
35
36 const Vector<size_t>& sizeClasses()
37 {
38     static Vector<size_t>* result;
39     static std::once_flag once;
40     std::call_once(
41         once,
42         [] {
43             result = new Vector<size_t>();
44             
45             auto add = [&] (size_t sizeClass) {
46                 if (Options::dumpSizeClasses())
47                     dataLog("Adding JSC MarkedSpace size class: ", sizeClass, "\n");
48                 // Perform some validation as we go.
49                 RELEASE_ASSERT(!(sizeClass % MarkedSpace::sizeStep));
50                 if (result->isEmpty())
51                     RELEASE_ASSERT(sizeClass == MarkedSpace::sizeStep);
52                 else
53                     RELEASE_ASSERT(sizeClass > result->last());
54                 result->append(sizeClass);
55             };
56             
57             // This is a definition of the size classes in our GC. It must define all of the
58             // size classes from sizeStep up to largeCutoff.
59     
60             // Have very precise size classes for the small stuff. This is a loop to make it easy to reduce
61             // atomSize.
62             for (size_t size = MarkedSpace::sizeStep; size < MarkedSpace::preciseCutoff; size += MarkedSpace::sizeStep)
63                 add(size);
64             
65             // We want to make sure that the remaining size classes minimize internal fragmentation (i.e.
66             // the wasted space at the tail end of a MarkedBlock) while proceeding roughly in an exponential
67             // way starting at just above the precise size classes to four cells per block.
68             
69             if (Options::dumpSizeClasses())
70                 dataLog("    Marked block payload size: ", static_cast<size_t>(MarkedSpace::blockPayload), "\n");
71             
72             for (unsigned i = 0; ; ++i) {
73                 double approximateSize = MarkedSpace::preciseCutoff * pow(Options::sizeClassProgression(), i);
74                 
75                 if (Options::dumpSizeClasses())
76                     dataLog("    Next size class as a double: ", approximateSize, "\n");
77         
78                 size_t approximateSizeInBytes = static_cast<size_t>(approximateSize);
79         
80                 if (Options::dumpSizeClasses())
81                     dataLog("    Next size class as bytes: ", approximateSizeInBytes, "\n");
82         
83                 // Make sure that the computer did the math correctly.
84                 RELEASE_ASSERT(approximateSizeInBytes >= MarkedSpace::preciseCutoff);
85                 
86                 if (approximateSizeInBytes > MarkedSpace::largeCutoff)
87                     break;
88                 
89                 size_t sizeClass =
90                     WTF::roundUpToMultipleOf<MarkedSpace::sizeStep>(approximateSizeInBytes);
91                 
92                 if (Options::dumpSizeClasses())
93                     dataLog("    Size class: ", sizeClass, "\n");
94                 
95                 // Optimize the size class so that there isn't any slop at the end of the block's
96                 // payload.
97                 unsigned cellsPerBlock = MarkedSpace::blockPayload / sizeClass;
98                 size_t possiblyBetterSizeClass = (MarkedSpace::blockPayload / cellsPerBlock) & ~(MarkedSpace::sizeStep - 1);
99                 
100                 if (Options::dumpSizeClasses())
101                     dataLog("    Possibly better size class: ", possiblyBetterSizeClass, "\n");
102
103                 // The size class we just came up with is better than the other one if it reduces
104                 // total wastage assuming we only allocate cells of that size.
105                 size_t originalWastage = MarkedSpace::blockPayload - cellsPerBlock * sizeClass;
106                 size_t newWastage = (possiblyBetterSizeClass - sizeClass) * cellsPerBlock;
107                 
108                 if (Options::dumpSizeClasses())
109                     dataLog("    Original wastage: ", originalWastage, ", new wastage: ", newWastage, "\n");
110                 
111                 size_t betterSizeClass;
112                 if (newWastage > originalWastage)
113                     betterSizeClass = sizeClass;
114                 else
115                     betterSizeClass = possiblyBetterSizeClass;
116                 
117                 if (Options::dumpSizeClasses())
118                     dataLog("    Choosing size class: ", betterSizeClass, "\n");
119                 
120                 if (betterSizeClass == result->last()) {
121                     // Defense for when expStep is small.
122                     continue;
123                 }
124                 
125                 // This is usually how we get out of the loop.
126                 if (betterSizeClass > MarkedSpace::largeCutoff
127                     || betterSizeClass > Options::largeAllocationCutoff())
128                     break;
129                 
130                 add(betterSizeClass);
131             }
132             
133             if (Options::dumpSizeClasses())
134                 dataLog("JSC Heap MarkedSpace size class dump: ", listDump(*result), "\n");
135
136             // We have an optimiation in MarkedSpace::optimalSizeFor() that assumes things about
137             // the size class table. This checks our results against that function's assumptions.
138             for (size_t size = MarkedSpace::sizeStep, i = 0; size <= MarkedSpace::preciseCutoff; size += MarkedSpace::sizeStep, i++)
139                 RELEASE_ASSERT(result->at(i) == size);
140         });
141     return *result;
142 }
143
144 template<typename TableType, typename SizeClassCons, typename DefaultCons>
145 void buildSizeClassTable(TableType& table, const SizeClassCons& cons, const DefaultCons& defaultCons)
146 {
147     size_t nextIndex = 0;
148     for (size_t sizeClass : sizeClasses()) {
149         auto entry = cons(sizeClass);
150         size_t index = MarkedSpace::sizeClassToIndex(sizeClass);
151         for (size_t i = nextIndex; i <= index; ++i)
152             table[i] = entry;
153         nextIndex = index + 1;
154     }
155     for (size_t i = nextIndex; i < MarkedSpace::numSizeClasses; ++i)
156         table[i] = defaultCons(MarkedSpace::indexToSizeClass(i));
157 }
158
159 } // anonymous namespace
160
161 void MarkedSpace::initializeSizeClassForStepSize()
162 {
163     // We call this multiple times and we may call it simultaneously from multiple threads. That's
164     // OK, since it always stores the same values into the table.
165     
166     buildSizeClassTable(
167         s_sizeClassForSizeStep,
168         [&] (size_t sizeClass) -> size_t {
169             return sizeClass;
170         },
171         [&] (size_t sizeClass) -> size_t {
172             return sizeClass;
173         });
174 }
175
176 MarkedSpace::MarkedSpace(Heap* heap)
177     : m_heap(heap)
178     , m_capacity(0)
179     , m_isIterating(false)
180 {
181     initializeSizeClassForStepSize();
182     
183     forEachSubspace(
184         [&] (Subspace& subspace, AllocatorAttributes attributes) -> IterationStatus {
185             subspace.attributes = attributes;
186             
187             buildSizeClassTable(
188                 subspace.allocatorForSizeStep,
189                 [&] (size_t sizeClass) -> MarkedAllocator* {
190                     return subspace.bagOfAllocators.add(heap, this, sizeClass, attributes);
191                 },
192                 [&] (size_t) -> MarkedAllocator* {
193                     return nullptr;
194                 });
195             
196             return IterationStatus::Continue;
197         });
198 }
199
200 MarkedSpace::~MarkedSpace()
201 {
202     forEachBlock(
203         [&] (MarkedBlock::Handle* block) {
204             freeBlock(block);
205         });
206     for (LargeAllocation* allocation : m_largeAllocations)
207         allocation->destroy();
208     ASSERT(!m_blocks.set().size());
209 }
210
211 void MarkedSpace::lastChanceToFinalize()
212 {
213     stopAllocating();
214     forEachAllocator(
215         [&] (MarkedAllocator& allocator) -> IterationStatus {
216             allocator.lastChanceToFinalize();
217             return IterationStatus::Continue;
218         });
219     for (LargeAllocation* allocation : m_largeAllocations)
220         allocation->lastChanceToFinalize();
221 }
222
223 void* MarkedSpace::allocate(Subspace& subspace, size_t bytes)
224 {
225     if (MarkedAllocator* allocator = allocatorFor(subspace, bytes))
226         return allocator->allocate();
227     return allocateLarge(subspace, bytes);
228 }
229
230 void* MarkedSpace::tryAllocate(Subspace& subspace, size_t bytes)
231 {
232     if (MarkedAllocator* allocator = allocatorFor(subspace, bytes))
233         return allocator->tryAllocate();
234     return tryAllocateLarge(subspace, bytes);
235 }
236
237 void* MarkedSpace::allocateLarge(Subspace& subspace, size_t size)
238 {
239     void* result = tryAllocateLarge(subspace, size);
240     RELEASE_ASSERT(result);
241     return result;
242 }
243
244 void* MarkedSpace::tryAllocateLarge(Subspace& subspace, size_t size)
245 {
246     m_heap->collectIfNecessaryOrDefer();
247     
248     size = WTF::roundUpToMultipleOf<sizeStep>(size);
249     LargeAllocation* allocation = LargeAllocation::tryCreate(*m_heap, size, subspace.attributes);
250     if (!allocation)
251         return nullptr;
252     
253     m_largeAllocations.append(allocation);
254     m_heap->didAllocate(size);
255     m_capacity += size;
256     return allocation->cell();
257 }
258
259 void MarkedSpace::sweep()
260 {
261     m_heap->sweeper()->willFinishSweeping();
262     forEachBlock(
263         [&] (MarkedBlock::Handle* block) {
264             block->sweep();
265         });
266 }
267
268 void MarkedSpace::sweepLargeAllocations()
269 {
270     RELEASE_ASSERT(m_largeAllocationsNurseryOffset == m_largeAllocations.size());
271     unsigned srcIndex = m_largeAllocationsNurseryOffsetForSweep;
272     unsigned dstIndex = srcIndex;
273     while (srcIndex < m_largeAllocations.size()) {
274         LargeAllocation* allocation = m_largeAllocations[srcIndex++];
275         allocation->sweep();
276         if (allocation->isEmpty()) {
277             m_capacity -= allocation->cellSize();
278             allocation->destroy();
279             continue;
280         }
281         m_largeAllocations[dstIndex++] = allocation;
282     }
283     m_largeAllocations.resize(dstIndex);
284     m_largeAllocationsNurseryOffset = m_largeAllocations.size();
285 }
286
287 void MarkedSpace::zombifySweep()
288 {
289     if (Options::logGC())
290         dataLog("Zombifying sweep...");
291     m_heap->sweeper()->willFinishSweeping();
292     forEachBlock(
293         [&] (MarkedBlock::Handle* block) {
294             if (block->needsSweeping())
295                 block->sweep();
296         });
297 }
298
299 void MarkedSpace::resetAllocators()
300 {
301     forEachAllocator(
302         [&] (MarkedAllocator& allocator) -> IterationStatus {
303             allocator.reset();
304             return IterationStatus::Continue;
305         });
306
307     m_blocksWithNewObjects.clear();
308     m_activeWeakSets.takeFrom(m_newActiveWeakSets);
309     if (m_heap->operationInProgress() == EdenCollection)
310         m_largeAllocationsNurseryOffsetForSweep = m_largeAllocationsNurseryOffset;
311     else
312         m_largeAllocationsNurseryOffsetForSweep = 0;
313     m_largeAllocationsNurseryOffset = m_largeAllocations.size();
314 }
315
316 void MarkedSpace::visitWeakSets(HeapRootVisitor& heapRootVisitor)
317 {
318     auto visit = [&] (WeakSet* weakSet) {
319         weakSet->visit(heapRootVisitor);
320     };
321     
322     m_newActiveWeakSets.forEach(visit);
323     
324     if (m_heap->operationInProgress() == FullCollection)
325         m_activeWeakSets.forEach(visit);
326 }
327
328 void MarkedSpace::reapWeakSets()
329 {
330     auto visit = [&] (WeakSet* weakSet) {
331         weakSet->reap();
332     };
333     
334     m_newActiveWeakSets.forEach(visit);
335     
336     if (m_heap->operationInProgress() == FullCollection)
337         m_activeWeakSets.forEach(visit);
338 }
339
340 void MarkedSpace::stopAllocating()
341 {
342     ASSERT(!isIterating());
343     forEachAllocator(
344         [&] (MarkedAllocator& allocator) -> IterationStatus {
345             allocator.stopAllocating();
346             return IterationStatus::Continue;
347         });
348 }
349
350 void MarkedSpace::prepareForMarking()
351 {
352     if (m_heap->operationInProgress() == EdenCollection)
353         m_largeAllocationsOffsetForThisCollection = m_largeAllocationsNurseryOffset;
354     else
355         m_largeAllocationsOffsetForThisCollection = 0;
356     m_largeAllocationsForThisCollectionBegin = m_largeAllocations.begin() + m_largeAllocationsOffsetForThisCollection;
357     m_largeAllocationsForThisCollectionSize = m_largeAllocations.size() - m_largeAllocationsOffsetForThisCollection;
358     m_largeAllocationsForThisCollectionEnd = m_largeAllocations.end();
359     RELEASE_ASSERT(m_largeAllocationsForThisCollectionEnd == m_largeAllocationsForThisCollectionBegin + m_largeAllocationsForThisCollectionSize);
360     std::sort(
361         m_largeAllocationsForThisCollectionBegin, m_largeAllocationsForThisCollectionEnd,
362         [&] (LargeAllocation* a, LargeAllocation* b) {
363             return a < b;
364         });
365 }
366
367 void MarkedSpace::resumeAllocating()
368 {
369     ASSERT(isIterating());
370     forEachAllocator(
371         [&] (MarkedAllocator& allocator) -> IterationStatus {
372             allocator.resumeAllocating();
373             return IterationStatus::Continue;
374         });
375     // Nothing to do for LargeAllocations.
376 }
377
378 bool MarkedSpace::isPagedOut(double deadline)
379 {
380     bool result = false;
381     forEachAllocator(
382         [&] (MarkedAllocator& allocator) -> IterationStatus {
383             if (allocator.isPagedOut(deadline)) {
384                 result = true;
385                 return IterationStatus::Done;
386             }
387             return IterationStatus::Continue;
388         });
389     // FIXME: Consider taking LargeAllocations into account here.
390     return result;
391 }
392
393 void MarkedSpace::freeBlock(MarkedBlock::Handle* block)
394 {
395     block->allocator()->removeBlock(block);
396     m_capacity -= MarkedBlock::blockSize;
397     m_blocks.remove(&block->block());
398     delete block;
399 }
400
401 void MarkedSpace::freeOrShrinkBlock(MarkedBlock::Handle* block)
402 {
403     if (!block->isEmpty()) {
404         block->shrink();
405         return;
406     }
407
408     freeBlock(block);
409 }
410
411 void MarkedSpace::shrink()
412 {
413     forEachBlock(
414         [&] (MarkedBlock::Handle* block) {
415             freeOrShrinkBlock(block);
416         });
417     // For LargeAllocations, we do the moral equivalent in sweepLargeAllocations().
418 }
419
420 void MarkedSpace::clearNewlyAllocated()
421 {
422     forEachAllocator(
423         [&] (MarkedAllocator& allocator) -> IterationStatus {
424             if (MarkedBlock::Handle* block = allocator.takeLastActiveBlock())
425                 block->clearNewlyAllocated();
426             return IterationStatus::Continue;
427         });
428     
429     for (unsigned i = m_largeAllocationsOffsetForThisCollection; i < m_largeAllocations.size(); ++i)
430         m_largeAllocations[i]->clearNewlyAllocated();
431
432 #if !ASSERT_DISABLED
433     forEachBlock(
434         [&] (MarkedBlock::Handle* block) {
435             ASSERT(!block->clearNewlyAllocated());
436         });
437
438     for (LargeAllocation* allocation : m_largeAllocations)
439         ASSERT(!allocation->isNewlyAllocated());
440 #endif // !ASSERT_DISABLED
441 }
442
443 #ifndef NDEBUG 
444 struct VerifyMarked : MarkedBlock::VoidFunctor { 
445     void operator()(MarkedBlock::Handle* block) const
446     {
447         if (block->needsFlip())
448             return;
449         switch (block->m_state) {
450         case MarkedBlock::Marked:
451             return;
452         default:
453             RELEASE_ASSERT_NOT_REACHED();
454         }
455     }
456 }; 
457 #endif 
458
459 void MarkedSpace::flip()
460 {
461     if (m_heap->operationInProgress() == EdenCollection) {
462         for (unsigned i = 0; i < m_blocksWithNewObjects.size(); ++i)
463             m_blocksWithNewObjects[i]->flipForEdenCollection();
464     } else {
465         HeapVersion nextVersion = m_version + 1;
466         if (UNLIKELY(nextVersion == initialVersion)) {
467             // Oh no! Version wrap-around! We handle this by flipping all blocks. This happens
468             // super rarely, probably never for most users.
469             forEachBlock(
470                 [&] (MarkedBlock::Handle* handle) {
471                     handle->flipIfNecessary();
472                 });
473         }
474         m_version = nextVersion; // Henceforth, flipIfNecessary() will trigger on all blocks.
475         for (LargeAllocation* allocation : m_largeAllocations)
476             allocation->flip();
477     }
478
479 #ifndef NDEBUG
480     VerifyMarked verifyFunctor;
481     forEachBlock(verifyFunctor);
482 #endif
483 }
484
485 void MarkedSpace::willStartIterating()
486 {
487     ASSERT(!isIterating());
488     stopAllocating();
489     m_isIterating = true;
490 }
491
492 void MarkedSpace::didFinishIterating()
493 {
494     ASSERT(isIterating());
495     resumeAllocating();
496     m_isIterating = false;
497 }
498
499 size_t MarkedSpace::objectCount()
500 {
501     size_t result = 0;
502     forEachBlock(
503         [&] (MarkedBlock::Handle* block) {
504             result += block->markCount();
505         });
506     for (LargeAllocation* allocation : m_largeAllocations) {
507         if (allocation->isMarked())
508             result++;
509     }
510     return result;
511 }
512
513 size_t MarkedSpace::size()
514 {
515     size_t result = 0;
516     forEachBlock(
517         [&] (MarkedBlock::Handle* block) {
518             result += block->markCount() * block->cellSize();
519         });
520     for (LargeAllocation* allocation : m_largeAllocations) {
521         if (allocation->isMarked())
522             result += allocation->cellSize();
523     }
524     return result;
525 }
526
527 size_t MarkedSpace::capacity()
528 {
529     return m_capacity;
530 }
531
532 void MarkedSpace::addActiveWeakSet(WeakSet* weakSet)
533 {
534     // We conservatively assume that the WeakSet should belong in the new set. In fact, some weak
535     // sets might contain new weak handles even though they are tied to old objects. This slightly
536     // increases the amount of scanning that an eden collection would have to do, but the effect
537     // ought to be small.
538     m_newActiveWeakSets.append(weakSet);
539 }
540
541 void MarkedSpace::didAddBlock(MarkedBlock::Handle* block)
542 {
543     m_capacity += MarkedBlock::blockSize;
544     m_blocks.add(&block->block());
545 }
546
547 void MarkedSpace::didAllocateInBlock(MarkedBlock::Handle* block)
548 {
549     block->assertFlipped();
550     m_blocksWithNewObjects.append(block);
551     
552     if (block->weakSet().isOnList()) {
553         block->weakSet().remove();
554         m_newActiveWeakSets.append(&block->weakSet());
555     }
556 }
557
558 } // namespace JSC