Get rid of HeapRootVisitor and make SlotVisitor less painful to use
[WebKit-https.git] / Source / JavaScriptCore / heap / MarkedSpace.cpp
1 /*
2  *  Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2016 Apple Inc. All rights reserved.
3  *  Copyright (C) 2007 Eric Seidel <eric@webkit.org>
4  *
5  *  This library is free software; you can redistribute it and/or
6  *  modify it under the terms of the GNU Lesser General Public
7  *  License as published by the Free Software Foundation; either
8  *  version 2 of the License, or (at your option) any later version.
9  *
10  *  This library is distributed in the hope that it will be useful,
11  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
12  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  *  Lesser General Public License for more details.
14  *
15  *  You should have received a copy of the GNU Lesser General Public
16  *  License along with this library; if not, write to the Free Software
17  *  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA
18  *
19  */
20
21 #include "config.h"
22 #include "MarkedSpace.h"
23
24 #include "FunctionCodeBlock.h"
25 #include "IncrementalSweeper.h"
26 #include "JSObject.h"
27 #include "JSCInlines.h"
28 #include "MarkedBlockInlines.h"
29 #include <wtf/ListDump.h>
30
31 namespace JSC {
32
33 std::array<size_t, MarkedSpace::numSizeClasses> MarkedSpace::s_sizeClassForSizeStep;
34
35 namespace {
36
37 const Vector<size_t>& sizeClasses()
38 {
39     static Vector<size_t>* result;
40     static std::once_flag once;
41     std::call_once(
42         once,
43         [] {
44             result = new Vector<size_t>();
45             
46             auto add = [&] (size_t sizeClass) {
47                 sizeClass = WTF::roundUpToMultipleOf<MarkedBlock::atomSize>(sizeClass);
48                 if (Options::dumpSizeClasses())
49                     dataLog("Adding JSC MarkedSpace size class: ", sizeClass, "\n");
50                 // Perform some validation as we go.
51                 RELEASE_ASSERT(!(sizeClass % MarkedSpace::sizeStep));
52                 if (result->isEmpty())
53                     RELEASE_ASSERT(sizeClass == MarkedSpace::sizeStep);
54                 result->append(sizeClass);
55             };
56             
57             // This is a definition of the size classes in our GC. It must define all of the
58             // size classes from sizeStep up to largeCutoff.
59     
60             // Have very precise size classes for the small stuff. This is a loop to make it easy to reduce
61             // atomSize.
62             for (size_t size = MarkedSpace::sizeStep; size < MarkedSpace::preciseCutoff; size += MarkedSpace::sizeStep)
63                 add(size);
64             
65             // We want to make sure that the remaining size classes minimize internal fragmentation (i.e.
66             // the wasted space at the tail end of a MarkedBlock) while proceeding roughly in an exponential
67             // way starting at just above the precise size classes to four cells per block.
68             
69             if (Options::dumpSizeClasses())
70                 dataLog("    Marked block payload size: ", static_cast<size_t>(MarkedSpace::blockPayload), "\n");
71             
72             for (unsigned i = 0; ; ++i) {
73                 double approximateSize = MarkedSpace::preciseCutoff * pow(Options::sizeClassProgression(), i);
74                 
75                 if (Options::dumpSizeClasses())
76                     dataLog("    Next size class as a double: ", approximateSize, "\n");
77         
78                 size_t approximateSizeInBytes = static_cast<size_t>(approximateSize);
79         
80                 if (Options::dumpSizeClasses())
81                     dataLog("    Next size class as bytes: ", approximateSizeInBytes, "\n");
82         
83                 // Make sure that the computer did the math correctly.
84                 RELEASE_ASSERT(approximateSizeInBytes >= MarkedSpace::preciseCutoff);
85                 
86                 if (approximateSizeInBytes > MarkedSpace::largeCutoff)
87                     break;
88                 
89                 size_t sizeClass =
90                     WTF::roundUpToMultipleOf<MarkedSpace::sizeStep>(approximateSizeInBytes);
91                 
92                 if (Options::dumpSizeClasses())
93                     dataLog("    Size class: ", sizeClass, "\n");
94                 
95                 // Optimize the size class so that there isn't any slop at the end of the block's
96                 // payload.
97                 unsigned cellsPerBlock = MarkedSpace::blockPayload / sizeClass;
98                 size_t possiblyBetterSizeClass = (MarkedSpace::blockPayload / cellsPerBlock) & ~(MarkedSpace::sizeStep - 1);
99                 
100                 if (Options::dumpSizeClasses())
101                     dataLog("    Possibly better size class: ", possiblyBetterSizeClass, "\n");
102
103                 // The size class we just came up with is better than the other one if it reduces
104                 // total wastage assuming we only allocate cells of that size.
105                 size_t originalWastage = MarkedSpace::blockPayload - cellsPerBlock * sizeClass;
106                 size_t newWastage = (possiblyBetterSizeClass - sizeClass) * cellsPerBlock;
107                 
108                 if (Options::dumpSizeClasses())
109                     dataLog("    Original wastage: ", originalWastage, ", new wastage: ", newWastage, "\n");
110                 
111                 size_t betterSizeClass;
112                 if (newWastage > originalWastage)
113                     betterSizeClass = sizeClass;
114                 else
115                     betterSizeClass = possiblyBetterSizeClass;
116                 
117                 if (Options::dumpSizeClasses())
118                     dataLog("    Choosing size class: ", betterSizeClass, "\n");
119                 
120                 if (betterSizeClass == result->last()) {
121                     // Defense for when expStep is small.
122                     continue;
123                 }
124                 
125                 // This is usually how we get out of the loop.
126                 if (betterSizeClass > MarkedSpace::largeCutoff
127                     || betterSizeClass > Options::largeAllocationCutoff())
128                     break;
129                 
130                 add(betterSizeClass);
131             }
132
133             add(sizeof(UnlinkedFunctionExecutable));
134             add(sizeof(UnlinkedFunctionCodeBlock));
135             add(sizeof(FunctionExecutable));
136             add(sizeof(FunctionCodeBlock));
137
138             {
139                 // Sort and deduplicate.
140                 std::sort(result->begin(), result->end());
141                 auto it = std::unique(result->begin(), result->end());
142                 result->shrinkCapacity(it - result->begin());
143             }
144
145             if (Options::dumpSizeClasses())
146                 dataLog("JSC Heap MarkedSpace size class dump: ", listDump(*result), "\n");
147
148             // We have an optimiation in MarkedSpace::optimalSizeFor() that assumes things about
149             // the size class table. This checks our results against that function's assumptions.
150             for (size_t size = MarkedSpace::sizeStep, i = 0; size <= MarkedSpace::preciseCutoff; size += MarkedSpace::sizeStep, i++)
151                 RELEASE_ASSERT(result->at(i) == size);
152         });
153     return *result;
154 }
155
156 template<typename TableType, typename SizeClassCons, typename DefaultCons>
157 void buildSizeClassTable(TableType& table, const SizeClassCons& cons, const DefaultCons& defaultCons)
158 {
159     size_t nextIndex = 0;
160     for (size_t sizeClass : sizeClasses()) {
161         auto entry = cons(sizeClass);
162         size_t index = MarkedSpace::sizeClassToIndex(sizeClass);
163         for (size_t i = nextIndex; i <= index; ++i)
164             table[i] = entry;
165         nextIndex = index + 1;
166     }
167     for (size_t i = nextIndex; i < MarkedSpace::numSizeClasses; ++i)
168         table[i] = defaultCons(MarkedSpace::indexToSizeClass(i));
169 }
170
171 } // anonymous namespace
172
173 void MarkedSpace::initializeSizeClassForStepSize()
174 {
175     // We call this multiple times and we may call it simultaneously from multiple threads. That's
176     // OK, since it always stores the same values into the table.
177     
178     buildSizeClassTable(
179         s_sizeClassForSizeStep,
180         [&] (size_t sizeClass) -> size_t {
181             return sizeClass;
182         },
183         [&] (size_t sizeClass) -> size_t {
184             return sizeClass;
185         });
186 }
187
188 MarkedSpace::MarkedSpace(Heap* heap)
189     : m_heap(heap)
190     , m_capacity(0)
191     , m_isIterating(false)
192 {
193     initializeSizeClassForStepSize();
194     
195     forEachSubspace(
196         [&] (Subspace& subspace, AllocatorAttributes attributes) -> IterationStatus {
197             subspace.attributes = attributes;
198             
199             buildSizeClassTable(
200                 subspace.allocatorForSizeStep,
201                 [&] (size_t sizeClass) -> MarkedAllocator* {
202                     return subspace.bagOfAllocators.add(heap, this, sizeClass, attributes);
203                 },
204                 [&] (size_t) -> MarkedAllocator* {
205                     return nullptr;
206                 });
207             
208             return IterationStatus::Continue;
209         });
210     
211     MarkedAllocator* previous = nullptr;
212     forEachSubspace(
213         [&] (Subspace& subspace, AllocatorAttributes) -> IterationStatus {
214             for (MarkedAllocator* allocator : subspace.bagOfAllocators) {
215                 allocator->setNextAllocator(previous);
216                 previous = allocator;
217             }
218             
219             return IterationStatus::Continue;
220         });
221     m_firstAllocator = previous;
222     m_allocatorForEmptyAllocation = previous;
223 }
224
225 MarkedSpace::~MarkedSpace()
226 {
227     forEachBlock(
228         [&] (MarkedBlock::Handle* block) {
229             freeBlock(block);
230         });
231     for (LargeAllocation* allocation : m_largeAllocations)
232         allocation->destroy();
233     ASSERT(!m_blocks.set().size());
234 }
235
236 void MarkedSpace::lastChanceToFinalize()
237 {
238     stopAllocating();
239     forEachAllocator(
240         [&] (MarkedAllocator& allocator) -> IterationStatus {
241             allocator.lastChanceToFinalize();
242             return IterationStatus::Continue;
243         });
244     for (LargeAllocation* allocation : m_largeAllocations)
245         allocation->lastChanceToFinalize();
246 }
247
248 void* MarkedSpace::allocate(Subspace& subspace, size_t bytes)
249 {
250     if (false)
251         dataLog("Allocating ", bytes, " bytes in ", subspace.attributes, ".\n");
252     if (MarkedAllocator* allocator = allocatorFor(subspace, bytes)) {
253         void* result = allocator->allocate();
254         return result;
255     }
256     return allocateLarge(subspace, nullptr, bytes);
257 }
258
259 void* MarkedSpace::allocate(Subspace& subspace, GCDeferralContext* deferralContext, size_t bytes)
260 {
261     if (false)
262         dataLog("Allocating ", bytes, " deferred bytes in ", subspace.attributes, ".\n");
263     if (MarkedAllocator* allocator = allocatorFor(subspace, bytes)) {
264         void* result = allocator->allocate(deferralContext);
265         return result;
266     }
267     return allocateLarge(subspace, deferralContext, bytes);
268 }
269
270 void* MarkedSpace::tryAllocate(Subspace& subspace, size_t bytes)
271 {
272     if (false)
273         dataLog("Try-allocating ", bytes, " bytes in ", subspace.attributes, ".\n");
274     if (MarkedAllocator* allocator = allocatorFor(subspace, bytes)) {
275         void* result = allocator->tryAllocate();
276         return result;
277     }
278     return tryAllocateLarge(subspace, nullptr, bytes);
279 }
280
281 void* MarkedSpace::tryAllocate(Subspace& subspace, GCDeferralContext* deferralContext, size_t bytes)
282 {
283     if (false)
284         dataLog("Try-allocating ", bytes, " deferred bytes in ", subspace.attributes, ".\n");
285     if (MarkedAllocator* allocator = allocatorFor(subspace, bytes)) {
286         void* result = allocator->tryAllocate(deferralContext);
287         return result;
288     }
289     return tryAllocateLarge(subspace, deferralContext, bytes);
290 }
291
292 void* MarkedSpace::allocateLarge(Subspace& subspace, GCDeferralContext* deferralContext, size_t size)
293 {
294     void* result = tryAllocateLarge(subspace, deferralContext, size);
295     RELEASE_ASSERT(result);
296     return result;
297 }
298
299 void* MarkedSpace::tryAllocateLarge(Subspace& subspace, GCDeferralContext* deferralContext, size_t size)
300 {
301     m_heap->collectIfNecessaryOrDefer(deferralContext);
302     
303     size = WTF::roundUpToMultipleOf<sizeStep>(size);
304     LargeAllocation* allocation = LargeAllocation::tryCreate(*m_heap, size, subspace.attributes);
305     if (!allocation)
306         return nullptr;
307     
308     m_largeAllocations.append(allocation);
309     m_heap->didAllocate(size);
310     m_capacity += size;
311     return allocation->cell();
312 }
313
314 void MarkedSpace::sweep()
315 {
316     m_heap->sweeper()->willFinishSweeping();
317     forEachAllocator(
318         [&] (MarkedAllocator& allocator) -> IterationStatus {
319             allocator.sweep();
320             return IterationStatus::Continue;
321         });
322 }
323
324 void MarkedSpace::sweepLargeAllocations()
325 {
326     RELEASE_ASSERT(m_largeAllocationsNurseryOffset == m_largeAllocations.size());
327     unsigned srcIndex = m_largeAllocationsNurseryOffsetForSweep;
328     unsigned dstIndex = srcIndex;
329     while (srcIndex < m_largeAllocations.size()) {
330         LargeAllocation* allocation = m_largeAllocations[srcIndex++];
331         allocation->sweep();
332         if (allocation->isEmpty()) {
333             m_capacity -= allocation->cellSize();
334             allocation->destroy();
335             continue;
336         }
337         m_largeAllocations[dstIndex++] = allocation;
338     }
339     m_largeAllocations.resize(dstIndex);
340     m_largeAllocationsNurseryOffset = m_largeAllocations.size();
341 }
342
343 void MarkedSpace::prepareForAllocation()
344 {
345     forEachAllocator(
346         [&] (MarkedAllocator& allocator) -> IterationStatus {
347             allocator.prepareForAllocation();
348             return IterationStatus::Continue;
349         });
350
351     m_activeWeakSets.takeFrom(m_newActiveWeakSets);
352     
353     if (m_heap->collectionScope() == CollectionScope::Eden)
354         m_largeAllocationsNurseryOffsetForSweep = m_largeAllocationsNurseryOffset;
355     else
356         m_largeAllocationsNurseryOffsetForSweep = 0;
357     m_largeAllocationsNurseryOffset = m_largeAllocations.size();
358     
359     m_allocatorForEmptyAllocation = m_firstAllocator;
360 }
361
362 void MarkedSpace::visitWeakSets(SlotVisitor& visitor)
363 {
364     auto visit = [&] (WeakSet* weakSet) {
365         weakSet->visit(visitor);
366     };
367     
368     m_newActiveWeakSets.forEach(visit);
369     
370     if (m_heap->collectionScope() == CollectionScope::Full)
371         m_activeWeakSets.forEach(visit);
372 }
373
374 void MarkedSpace::reapWeakSets()
375 {
376     auto visit = [&] (WeakSet* weakSet) {
377         weakSet->reap();
378     };
379     
380     m_newActiveWeakSets.forEach(visit);
381     
382     if (m_heap->collectionScope() == CollectionScope::Full)
383         m_activeWeakSets.forEach(visit);
384 }
385
386 void MarkedSpace::stopAllocating()
387 {
388     ASSERT(!isIterating());
389     forEachAllocator(
390         [&] (MarkedAllocator& allocator) -> IterationStatus {
391             allocator.stopAllocating();
392             return IterationStatus::Continue;
393         });
394 }
395
396 void MarkedSpace::prepareForConservativeScan()
397 {
398     m_largeAllocationsForThisCollectionBegin = m_largeAllocations.begin() + m_largeAllocationsOffsetForThisCollection;
399     m_largeAllocationsForThisCollectionSize = m_largeAllocations.size() - m_largeAllocationsOffsetForThisCollection;
400     m_largeAllocationsForThisCollectionEnd = m_largeAllocations.end();
401     RELEASE_ASSERT(m_largeAllocationsForThisCollectionEnd == m_largeAllocationsForThisCollectionBegin + m_largeAllocationsForThisCollectionSize);
402     
403     std::sort(
404         m_largeAllocationsForThisCollectionBegin, m_largeAllocationsForThisCollectionEnd,
405         [&] (LargeAllocation* a, LargeAllocation* b) {
406             return a < b;
407         });
408 }
409
410 void MarkedSpace::prepareForMarking()
411 {
412     if (m_heap->collectionScope() == CollectionScope::Eden)
413         m_largeAllocationsOffsetForThisCollection = m_largeAllocationsNurseryOffset;
414     else
415         m_largeAllocationsOffsetForThisCollection = 0;
416 }
417
418 void MarkedSpace::resumeAllocating()
419 {
420     forEachAllocator(
421         [&] (MarkedAllocator& allocator) -> IterationStatus {
422             allocator.resumeAllocating();
423             return IterationStatus::Continue;
424         });
425     // Nothing to do for LargeAllocations.
426 }
427
428 bool MarkedSpace::isPagedOut(double deadline)
429 {
430     bool result = false;
431     forEachAllocator(
432         [&] (MarkedAllocator& allocator) -> IterationStatus {
433             if (allocator.isPagedOut(deadline)) {
434                 result = true;
435                 return IterationStatus::Done;
436             }
437             return IterationStatus::Continue;
438         });
439     // FIXME: Consider taking LargeAllocations into account here.
440     return result;
441 }
442
443 void MarkedSpace::freeBlock(MarkedBlock::Handle* block)
444 {
445     block->allocator()->removeBlock(block);
446     m_capacity -= MarkedBlock::blockSize;
447     m_blocks.remove(&block->block());
448     delete block;
449 }
450
451 void MarkedSpace::freeOrShrinkBlock(MarkedBlock::Handle* block)
452 {
453     if (!block->isEmpty()) {
454         block->shrink();
455         return;
456     }
457
458     freeBlock(block);
459 }
460
461 void MarkedSpace::shrink()
462 {
463     forEachAllocator(
464         [&] (MarkedAllocator& allocator) -> IterationStatus {
465             allocator.shrink();
466             return IterationStatus::Continue;
467         });
468 }
469
470 void MarkedSpace::beginMarking()
471 {
472     if (m_heap->collectionScope() == CollectionScope::Full) {
473         forEachAllocator(
474             [&] (MarkedAllocator& allocator) -> IterationStatus {
475                 allocator.beginMarkingForFullCollection();
476                 return IterationStatus::Continue;
477             });
478
479         if (UNLIKELY(nextVersion(m_markingVersion) == initialVersion)) {
480             forEachBlock(
481                 [&] (MarkedBlock::Handle* handle) {
482                     handle->block().resetMarks();
483                 });
484         }
485         
486         m_markingVersion = nextVersion(m_markingVersion);
487         
488         for (LargeAllocation* allocation : m_largeAllocations)
489             allocation->flip();
490     }
491
492     if (!ASSERT_DISABLED) {
493         forEachBlock(
494             [&] (MarkedBlock::Handle* block) {
495                 if (block->areMarksStale())
496                     return;
497                 ASSERT(!block->isFreeListed());
498             });
499     }
500     
501     m_isMarking = true;
502 }
503
504 void MarkedSpace::endMarking()
505 {
506     if (UNLIKELY(nextVersion(m_newlyAllocatedVersion) == initialVersion)) {
507         forEachBlock(
508             [&] (MarkedBlock::Handle* handle) {
509                 handle->resetAllocated();
510             });
511     }
512         
513     m_newlyAllocatedVersion = nextVersion(m_newlyAllocatedVersion);
514     
515     for (unsigned i = m_largeAllocationsOffsetForThisCollection; i < m_largeAllocations.size(); ++i)
516         m_largeAllocations[i]->clearNewlyAllocated();
517
518     if (!ASSERT_DISABLED) {
519         for (LargeAllocation* allocation : m_largeAllocations)
520             ASSERT_UNUSED(allocation, !allocation->isNewlyAllocated());
521     }
522
523     forEachAllocator(
524         [&] (MarkedAllocator& allocator) -> IterationStatus {
525             allocator.endMarking();
526             return IterationStatus::Continue;
527         });
528     
529     m_isMarking = false;
530 }
531
532 void MarkedSpace::willStartIterating()
533 {
534     ASSERT(!isIterating());
535     stopAllocating();
536     m_isIterating = true;
537 }
538
539 void MarkedSpace::didFinishIterating()
540 {
541     ASSERT(isIterating());
542     resumeAllocating();
543     m_isIterating = false;
544 }
545
546 size_t MarkedSpace::objectCount()
547 {
548     size_t result = 0;
549     forEachBlock(
550         [&] (MarkedBlock::Handle* block) {
551             result += block->markCount();
552         });
553     for (LargeAllocation* allocation : m_largeAllocations) {
554         if (allocation->isMarked())
555             result++;
556     }
557     return result;
558 }
559
560 size_t MarkedSpace::size()
561 {
562     size_t result = 0;
563     forEachBlock(
564         [&] (MarkedBlock::Handle* block) {
565             result += block->markCount() * block->cellSize();
566         });
567     for (LargeAllocation* allocation : m_largeAllocations) {
568         if (allocation->isMarked())
569             result += allocation->cellSize();
570     }
571     return result;
572 }
573
574 size_t MarkedSpace::capacity()
575 {
576     return m_capacity;
577 }
578
579 void MarkedSpace::addActiveWeakSet(WeakSet* weakSet)
580 {
581     // We conservatively assume that the WeakSet should belong in the new set. In fact, some weak
582     // sets might contain new weak handles even though they are tied to old objects. This slightly
583     // increases the amount of scanning that an eden collection would have to do, but the effect
584     // ought to be small.
585     m_newActiveWeakSets.append(weakSet);
586 }
587
588 void MarkedSpace::didAddBlock(MarkedBlock::Handle* block)
589 {
590     // WARNING: This function is called before block is fully initialized. The block will not know
591     // its cellSize() or attributes(). The latter implies that you can't ask things like
592     // needsDestruction().
593     m_capacity += MarkedBlock::blockSize;
594     m_blocks.add(&block->block());
595 }
596
597 void MarkedSpace::didAllocateInBlock(MarkedBlock::Handle* block)
598 {
599     if (block->weakSet().isOnList()) {
600         block->weakSet().remove();
601         m_newActiveWeakSets.append(&block->weakSet());
602     }
603 }
604
605 MarkedBlock::Handle* MarkedSpace::findEmptyBlockToSteal()
606 {
607     for (; m_allocatorForEmptyAllocation; m_allocatorForEmptyAllocation = m_allocatorForEmptyAllocation->nextAllocator()) {
608         if (MarkedBlock::Handle* block = m_allocatorForEmptyAllocation->findEmptyBlockToSteal())
609             return block;
610     }
611     return nullptr;
612 }
613
614 void MarkedSpace::snapshotUnswept()
615 {
616     if (m_heap->collectionScope() == CollectionScope::Eden) {
617         forEachAllocator(
618             [&] (MarkedAllocator& allocator) -> IterationStatus {
619                 allocator.snapshotUnsweptForEdenCollection();
620                 return IterationStatus::Continue;
621             });
622     } else {
623         forEachAllocator(
624             [&] (MarkedAllocator& allocator) -> IterationStatus {
625                 allocator.snapshotUnsweptForFullCollection();
626                 return IterationStatus::Continue;
627             });
628     }
629 }
630
631 void MarkedSpace::assertNoUnswept()
632 {
633     if (ASSERT_DISABLED)
634         return;
635     forEachAllocator(
636         [&] (MarkedAllocator& allocator) -> IterationStatus {
637             allocator.assertNoUnswept();
638             return IterationStatus::Continue;
639         });
640 }
641
642 void MarkedSpace::dumpBits(PrintStream& out)
643 {
644     forEachAllocator(
645         [&] (MarkedAllocator& allocator) -> IterationStatus {
646             out.print("Bits for ", allocator, ":\n");
647             allocator.dumpBits(out);
648             return IterationStatus::Continue;
649         });
650 }
651
652 } // namespace JSC