MarkedBlock should know what objects are live during marking
[WebKit-https.git] / Source / JavaScriptCore / heap / MarkedSpace.cpp
1 /*
2  *  Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2016 Apple Inc. All rights reserved.
3  *  Copyright (C) 2007 Eric Seidel <eric@webkit.org>
4  *
5  *  This library is free software; you can redistribute it and/or
6  *  modify it under the terms of the GNU Lesser General Public
7  *  License as published by the Free Software Foundation; either
8  *  version 2 of the License, or (at your option) any later version.
9  *
10  *  This library is distributed in the hope that it will be useful,
11  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
12  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  *  Lesser General Public License for more details.
14  *
15  *  You should have received a copy of the GNU Lesser General Public
16  *  License along with this library; if not, write to the Free Software
17  *  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA
18  *
19  */
20
21 #include "config.h"
22 #include "MarkedSpace.h"
23
24 #include "IncrementalSweeper.h"
25 #include "JSObject.h"
26 #include "JSCInlines.h"
27 #include "MarkedBlockInlines.h"
28 #include <wtf/ListDump.h>
29
30 namespace JSC {
31
32 std::array<size_t, MarkedSpace::numSizeClasses> MarkedSpace::s_sizeClassForSizeStep;
33
34 namespace {
35
36 const Vector<size_t>& sizeClasses()
37 {
38     static Vector<size_t>* result;
39     static std::once_flag once;
40     std::call_once(
41         once,
42         [] {
43             result = new Vector<size_t>();
44             
45             auto add = [&] (size_t sizeClass) {
46                 if (Options::dumpSizeClasses())
47                     dataLog("Adding JSC MarkedSpace size class: ", sizeClass, "\n");
48                 // Perform some validation as we go.
49                 RELEASE_ASSERT(!(sizeClass % MarkedSpace::sizeStep));
50                 if (result->isEmpty())
51                     RELEASE_ASSERT(sizeClass == MarkedSpace::sizeStep);
52                 else
53                     RELEASE_ASSERT(sizeClass > result->last());
54                 result->append(sizeClass);
55             };
56             
57             // This is a definition of the size classes in our GC. It must define all of the
58             // size classes from sizeStep up to largeCutoff.
59     
60             // Have very precise size classes for the small stuff. This is a loop to make it easy to reduce
61             // atomSize.
62             for (size_t size = MarkedSpace::sizeStep; size < MarkedSpace::preciseCutoff; size += MarkedSpace::sizeStep)
63                 add(size);
64             
65             // We want to make sure that the remaining size classes minimize internal fragmentation (i.e.
66             // the wasted space at the tail end of a MarkedBlock) while proceeding roughly in an exponential
67             // way starting at just above the precise size classes to four cells per block.
68             
69             if (Options::dumpSizeClasses())
70                 dataLog("    Marked block payload size: ", static_cast<size_t>(MarkedSpace::blockPayload), "\n");
71             
72             for (unsigned i = 0; ; ++i) {
73                 double approximateSize = MarkedSpace::preciseCutoff * pow(Options::sizeClassProgression(), i);
74                 
75                 if (Options::dumpSizeClasses())
76                     dataLog("    Next size class as a double: ", approximateSize, "\n");
77         
78                 size_t approximateSizeInBytes = static_cast<size_t>(approximateSize);
79         
80                 if (Options::dumpSizeClasses())
81                     dataLog("    Next size class as bytes: ", approximateSizeInBytes, "\n");
82         
83                 // Make sure that the computer did the math correctly.
84                 RELEASE_ASSERT(approximateSizeInBytes >= MarkedSpace::preciseCutoff);
85                 
86                 if (approximateSizeInBytes > MarkedSpace::largeCutoff)
87                     break;
88                 
89                 size_t sizeClass =
90                     WTF::roundUpToMultipleOf<MarkedSpace::sizeStep>(approximateSizeInBytes);
91                 
92                 if (Options::dumpSizeClasses())
93                     dataLog("    Size class: ", sizeClass, "\n");
94                 
95                 // Optimize the size class so that there isn't any slop at the end of the block's
96                 // payload.
97                 unsigned cellsPerBlock = MarkedSpace::blockPayload / sizeClass;
98                 size_t possiblyBetterSizeClass = (MarkedSpace::blockPayload / cellsPerBlock) & ~(MarkedSpace::sizeStep - 1);
99                 
100                 if (Options::dumpSizeClasses())
101                     dataLog("    Possibly better size class: ", possiblyBetterSizeClass, "\n");
102
103                 // The size class we just came up with is better than the other one if it reduces
104                 // total wastage assuming we only allocate cells of that size.
105                 size_t originalWastage = MarkedSpace::blockPayload - cellsPerBlock * sizeClass;
106                 size_t newWastage = (possiblyBetterSizeClass - sizeClass) * cellsPerBlock;
107                 
108                 if (Options::dumpSizeClasses())
109                     dataLog("    Original wastage: ", originalWastage, ", new wastage: ", newWastage, "\n");
110                 
111                 size_t betterSizeClass;
112                 if (newWastage > originalWastage)
113                     betterSizeClass = sizeClass;
114                 else
115                     betterSizeClass = possiblyBetterSizeClass;
116                 
117                 if (Options::dumpSizeClasses())
118                     dataLog("    Choosing size class: ", betterSizeClass, "\n");
119                 
120                 if (betterSizeClass == result->last()) {
121                     // Defense for when expStep is small.
122                     continue;
123                 }
124                 
125                 // This is usually how we get out of the loop.
126                 if (betterSizeClass > MarkedSpace::largeCutoff
127                     || betterSizeClass > Options::largeAllocationCutoff())
128                     break;
129                 
130                 add(betterSizeClass);
131             }
132             
133             if (Options::dumpSizeClasses())
134                 dataLog("JSC Heap MarkedSpace size class dump: ", listDump(*result), "\n");
135
136             // We have an optimiation in MarkedSpace::optimalSizeFor() that assumes things about
137             // the size class table. This checks our results against that function's assumptions.
138             for (size_t size = MarkedSpace::sizeStep, i = 0; size <= MarkedSpace::preciseCutoff; size += MarkedSpace::sizeStep, i++)
139                 RELEASE_ASSERT(result->at(i) == size);
140         });
141     return *result;
142 }
143
144 template<typename TableType, typename SizeClassCons, typename DefaultCons>
145 void buildSizeClassTable(TableType& table, const SizeClassCons& cons, const DefaultCons& defaultCons)
146 {
147     size_t nextIndex = 0;
148     for (size_t sizeClass : sizeClasses()) {
149         auto entry = cons(sizeClass);
150         size_t index = MarkedSpace::sizeClassToIndex(sizeClass);
151         for (size_t i = nextIndex; i <= index; ++i)
152             table[i] = entry;
153         nextIndex = index + 1;
154     }
155     for (size_t i = nextIndex; i < MarkedSpace::numSizeClasses; ++i)
156         table[i] = defaultCons(MarkedSpace::indexToSizeClass(i));
157 }
158
159 } // anonymous namespace
160
161 void MarkedSpace::initializeSizeClassForStepSize()
162 {
163     // We call this multiple times and we may call it simultaneously from multiple threads. That's
164     // OK, since it always stores the same values into the table.
165     
166     buildSizeClassTable(
167         s_sizeClassForSizeStep,
168         [&] (size_t sizeClass) -> size_t {
169             return sizeClass;
170         },
171         [&] (size_t sizeClass) -> size_t {
172             return sizeClass;
173         });
174 }
175
176 MarkedSpace::MarkedSpace(Heap* heap)
177     : m_heap(heap)
178     , m_capacity(0)
179     , m_isIterating(false)
180 {
181     initializeSizeClassForStepSize();
182     
183     forEachSubspace(
184         [&] (Subspace& subspace, AllocatorAttributes attributes) -> IterationStatus {
185             subspace.attributes = attributes;
186             
187             buildSizeClassTable(
188                 subspace.allocatorForSizeStep,
189                 [&] (size_t sizeClass) -> MarkedAllocator* {
190                     return subspace.bagOfAllocators.add(heap, this, sizeClass, attributes);
191                 },
192                 [&] (size_t) -> MarkedAllocator* {
193                     return nullptr;
194                 });
195             
196             return IterationStatus::Continue;
197         });
198     
199     MarkedAllocator* previous = nullptr;
200     forEachSubspace(
201         [&] (Subspace& subspace, AllocatorAttributes) -> IterationStatus {
202             for (MarkedAllocator* allocator : subspace.bagOfAllocators) {
203                 allocator->setNextAllocator(previous);
204                 previous = allocator;
205             }
206             
207             return IterationStatus::Continue;
208         });
209     m_firstAllocator = previous;
210     m_allocatorForEmptyAllocation = previous;
211 }
212
213 MarkedSpace::~MarkedSpace()
214 {
215     forEachBlock(
216         [&] (MarkedBlock::Handle* block) {
217             freeBlock(block);
218         });
219     for (LargeAllocation* allocation : m_largeAllocations)
220         allocation->destroy();
221     ASSERT(!m_blocks.set().size());
222 }
223
224 void MarkedSpace::lastChanceToFinalize()
225 {
226     stopAllocating();
227     forEachAllocator(
228         [&] (MarkedAllocator& allocator) -> IterationStatus {
229             allocator.lastChanceToFinalize();
230             return IterationStatus::Continue;
231         });
232     for (LargeAllocation* allocation : m_largeAllocations)
233         allocation->lastChanceToFinalize();
234 }
235
236 void* MarkedSpace::allocate(Subspace& subspace, size_t bytes)
237 {
238     if (false)
239         dataLog("Allocating ", bytes, " bytes in ", subspace.attributes, ".\n");
240     if (MarkedAllocator* allocator = allocatorFor(subspace, bytes)) {
241         void* result = allocator->allocate();
242         return result;
243     }
244     return allocateLarge(subspace, nullptr, bytes);
245 }
246
247 void* MarkedSpace::allocate(Subspace& subspace, GCDeferralContext* deferralContext, size_t bytes)
248 {
249     if (false)
250         dataLog("Allocating ", bytes, " deferred bytes in ", subspace.attributes, ".\n");
251     if (MarkedAllocator* allocator = allocatorFor(subspace, bytes)) {
252         void* result = allocator->allocate(deferralContext);
253         return result;
254     }
255     return allocateLarge(subspace, deferralContext, bytes);
256 }
257
258 void* MarkedSpace::tryAllocate(Subspace& subspace, size_t bytes)
259 {
260     if (false)
261         dataLog("Try-allocating ", bytes, " bytes in ", subspace.attributes, ".\n");
262     if (MarkedAllocator* allocator = allocatorFor(subspace, bytes)) {
263         void* result = allocator->tryAllocate();
264         return result;
265     }
266     return tryAllocateLarge(subspace, nullptr, bytes);
267 }
268
269 void* MarkedSpace::tryAllocate(Subspace& subspace, GCDeferralContext* deferralContext, size_t bytes)
270 {
271     if (false)
272         dataLog("Try-allocating ", bytes, " deferred bytes in ", subspace.attributes, ".\n");
273     if (MarkedAllocator* allocator = allocatorFor(subspace, bytes)) {
274         void* result = allocator->tryAllocate(deferralContext);
275         return result;
276     }
277     return tryAllocateLarge(subspace, deferralContext, bytes);
278 }
279
280 void* MarkedSpace::allocateLarge(Subspace& subspace, GCDeferralContext* deferralContext, size_t size)
281 {
282     void* result = tryAllocateLarge(subspace, deferralContext, size);
283     RELEASE_ASSERT(result);
284     return result;
285 }
286
287 void* MarkedSpace::tryAllocateLarge(Subspace& subspace, GCDeferralContext* deferralContext, size_t size)
288 {
289     m_heap->collectIfNecessaryOrDefer(deferralContext);
290     
291     size = WTF::roundUpToMultipleOf<sizeStep>(size);
292     LargeAllocation* allocation = LargeAllocation::tryCreate(*m_heap, size, subspace.attributes);
293     if (!allocation)
294         return nullptr;
295     
296     m_largeAllocations.append(allocation);
297     m_heap->didAllocate(size);
298     m_capacity += size;
299     return allocation->cell();
300 }
301
302 void MarkedSpace::sweep()
303 {
304     m_heap->sweeper()->willFinishSweeping();
305     forEachAllocator(
306         [&] (MarkedAllocator& allocator) -> IterationStatus {
307             allocator.sweep();
308             return IterationStatus::Continue;
309         });
310 }
311
312 void MarkedSpace::sweepLargeAllocations()
313 {
314     RELEASE_ASSERT(m_largeAllocationsNurseryOffset == m_largeAllocations.size());
315     unsigned srcIndex = m_largeAllocationsNurseryOffsetForSweep;
316     unsigned dstIndex = srcIndex;
317     while (srcIndex < m_largeAllocations.size()) {
318         LargeAllocation* allocation = m_largeAllocations[srcIndex++];
319         allocation->sweep();
320         if (allocation->isEmpty()) {
321             m_capacity -= allocation->cellSize();
322             allocation->destroy();
323             continue;
324         }
325         m_largeAllocations[dstIndex++] = allocation;
326     }
327     m_largeAllocations.resize(dstIndex);
328     m_largeAllocationsNurseryOffset = m_largeAllocations.size();
329 }
330
331 void MarkedSpace::prepareForAllocation()
332 {
333     forEachAllocator(
334         [&] (MarkedAllocator& allocator) -> IterationStatus {
335             allocator.prepareForAllocation();
336             return IterationStatus::Continue;
337         });
338
339     m_activeWeakSets.takeFrom(m_newActiveWeakSets);
340     
341     if (m_heap->operationInProgress() == EdenCollection)
342         m_largeAllocationsNurseryOffsetForSweep = m_largeAllocationsNurseryOffset;
343     else
344         m_largeAllocationsNurseryOffsetForSweep = 0;
345     m_largeAllocationsNurseryOffset = m_largeAllocations.size();
346     
347     m_allocatorForEmptyAllocation = m_firstAllocator;
348 }
349
350 void MarkedSpace::visitWeakSets(HeapRootVisitor& heapRootVisitor)
351 {
352     auto visit = [&] (WeakSet* weakSet) {
353         weakSet->visit(heapRootVisitor);
354     };
355     
356     m_newActiveWeakSets.forEach(visit);
357     
358     if (m_heap->operationInProgress() == FullCollection)
359         m_activeWeakSets.forEach(visit);
360 }
361
362 void MarkedSpace::reapWeakSets()
363 {
364     auto visit = [&] (WeakSet* weakSet) {
365         weakSet->reap();
366     };
367     
368     m_newActiveWeakSets.forEach(visit);
369     
370     if (m_heap->operationInProgress() == FullCollection)
371         m_activeWeakSets.forEach(visit);
372 }
373
374 void MarkedSpace::stopAllocating()
375 {
376     ASSERT(!isIterating());
377     forEachAllocator(
378         [&] (MarkedAllocator& allocator) -> IterationStatus {
379             allocator.stopAllocating();
380             return IterationStatus::Continue;
381         });
382 }
383
384 void MarkedSpace::prepareForMarking()
385 {
386     if (m_heap->operationInProgress() == EdenCollection)
387         m_largeAllocationsOffsetForThisCollection = m_largeAllocationsNurseryOffset;
388     else
389         m_largeAllocationsOffsetForThisCollection = 0;
390     m_largeAllocationsForThisCollectionBegin = m_largeAllocations.begin() + m_largeAllocationsOffsetForThisCollection;
391     m_largeAllocationsForThisCollectionSize = m_largeAllocations.size() - m_largeAllocationsOffsetForThisCollection;
392     m_largeAllocationsForThisCollectionEnd = m_largeAllocations.end();
393     RELEASE_ASSERT(m_largeAllocationsForThisCollectionEnd == m_largeAllocationsForThisCollectionBegin + m_largeAllocationsForThisCollectionSize);
394     std::sort(
395         m_largeAllocationsForThisCollectionBegin, m_largeAllocationsForThisCollectionEnd,
396         [&] (LargeAllocation* a, LargeAllocation* b) {
397             return a < b;
398         });
399 }
400
401 void MarkedSpace::resumeAllocating()
402 {
403     ASSERT(isIterating());
404     forEachAllocator(
405         [&] (MarkedAllocator& allocator) -> IterationStatus {
406             allocator.resumeAllocating();
407             return IterationStatus::Continue;
408         });
409     // Nothing to do for LargeAllocations.
410 }
411
412 bool MarkedSpace::isPagedOut(double deadline)
413 {
414     bool result = false;
415     forEachAllocator(
416         [&] (MarkedAllocator& allocator) -> IterationStatus {
417             if (allocator.isPagedOut(deadline)) {
418                 result = true;
419                 return IterationStatus::Done;
420             }
421             return IterationStatus::Continue;
422         });
423     // FIXME: Consider taking LargeAllocations into account here.
424     return result;
425 }
426
427 void MarkedSpace::freeBlock(MarkedBlock::Handle* block)
428 {
429     block->allocator()->removeBlock(block);
430     m_capacity -= MarkedBlock::blockSize;
431     m_blocks.remove(&block->block());
432     delete block;
433 }
434
435 void MarkedSpace::freeOrShrinkBlock(MarkedBlock::Handle* block)
436 {
437     if (!block->isEmpty()) {
438         block->shrink();
439         return;
440     }
441
442     freeBlock(block);
443 }
444
445 void MarkedSpace::shrink()
446 {
447     forEachAllocator(
448         [&] (MarkedAllocator& allocator) -> IterationStatus {
449             allocator.shrink();
450             return IterationStatus::Continue;
451         });
452 }
453
454 void MarkedSpace::beginMarking()
455 {
456     if (m_heap->operationInProgress() == FullCollection) {
457         forEachAllocator(
458             [&] (MarkedAllocator& allocator) -> IterationStatus {
459                 allocator.beginMarkingForFullCollection();
460                 return IterationStatus::Continue;
461             });
462
463         if (UNLIKELY(nextVersion(m_markingVersion) == initialVersion)) {
464             forEachBlock(
465                 [&] (MarkedBlock::Handle* handle) {
466                     handle->block().resetMarks();
467                 });
468         }
469         
470         m_markingVersion = nextVersion(m_markingVersion);
471         
472         for (LargeAllocation* allocation : m_largeAllocations)
473             allocation->flip();
474     }
475
476     if (!ASSERT_DISABLED) {
477         forEachBlock(
478             [&] (MarkedBlock::Handle* block) {
479                 if (block->areMarksStale())
480                     return;
481                 ASSERT(!block->isFreeListed());
482             });
483     }
484     
485     m_isMarking = true;
486 }
487
488 void MarkedSpace::endMarking()
489 {
490     if (UNLIKELY(nextVersion(m_newlyAllocatedVersion) == initialVersion)) {
491         forEachBlock(
492             [&] (MarkedBlock::Handle* handle) {
493                 handle->resetAllocated();
494             });
495     }
496         
497     m_newlyAllocatedVersion = nextVersion(m_newlyAllocatedVersion);
498     
499     for (unsigned i = m_largeAllocationsOffsetForThisCollection; i < m_largeAllocations.size(); ++i)
500         m_largeAllocations[i]->clearNewlyAllocated();
501
502     if (!ASSERT_DISABLED) {
503         for (LargeAllocation* allocation : m_largeAllocations)
504             ASSERT_UNUSED(allocation, !allocation->isNewlyAllocated());
505     }
506
507     forEachAllocator(
508         [&] (MarkedAllocator& allocator) -> IterationStatus {
509             allocator.endMarking();
510             return IterationStatus::Continue;
511         });
512     
513     m_isMarking = false;
514 }
515
516 void MarkedSpace::willStartIterating()
517 {
518     ASSERT(!isIterating());
519     stopAllocating();
520     m_isIterating = true;
521 }
522
523 void MarkedSpace::didFinishIterating()
524 {
525     ASSERT(isIterating());
526     resumeAllocating();
527     m_isIterating = false;
528 }
529
530 size_t MarkedSpace::objectCount()
531 {
532     size_t result = 0;
533     forEachBlock(
534         [&] (MarkedBlock::Handle* block) {
535             result += block->markCount();
536         });
537     for (LargeAllocation* allocation : m_largeAllocations) {
538         if (allocation->isMarked())
539             result++;
540     }
541     return result;
542 }
543
544 size_t MarkedSpace::size()
545 {
546     size_t result = 0;
547     forEachBlock(
548         [&] (MarkedBlock::Handle* block) {
549             result += block->markCount() * block->cellSize();
550         });
551     for (LargeAllocation* allocation : m_largeAllocations) {
552         if (allocation->isMarked())
553             result += allocation->cellSize();
554     }
555     return result;
556 }
557
558 size_t MarkedSpace::capacity()
559 {
560     return m_capacity;
561 }
562
563 void MarkedSpace::addActiveWeakSet(WeakSet* weakSet)
564 {
565     // We conservatively assume that the WeakSet should belong in the new set. In fact, some weak
566     // sets might contain new weak handles even though they are tied to old objects. This slightly
567     // increases the amount of scanning that an eden collection would have to do, but the effect
568     // ought to be small.
569     m_newActiveWeakSets.append(weakSet);
570 }
571
572 void MarkedSpace::didAddBlock(MarkedBlock::Handle* block)
573 {
574     // WARNING: This function is called before block is fully initialized. The block will not know
575     // its cellSize() or attributes(). The latter implies that you can't ask things like
576     // needsDestruction().
577     m_capacity += MarkedBlock::blockSize;
578     m_blocks.add(&block->block());
579 }
580
581 void MarkedSpace::didAllocateInBlock(MarkedBlock::Handle* block)
582 {
583     if (block->weakSet().isOnList()) {
584         block->weakSet().remove();
585         m_newActiveWeakSets.append(&block->weakSet());
586     }
587 }
588
589 MarkedBlock::Handle* MarkedSpace::findEmptyBlockToSteal()
590 {
591     for (; m_allocatorForEmptyAllocation; m_allocatorForEmptyAllocation = m_allocatorForEmptyAllocation->nextAllocator()) {
592         if (MarkedBlock::Handle* block = m_allocatorForEmptyAllocation->findEmptyBlockToSteal())
593             return block;
594     }
595     return nullptr;
596 }
597
598 void MarkedSpace::snapshotUnswept()
599 {
600     if (m_heap->operationInProgress() == EdenCollection) {
601         forEachAllocator(
602             [&] (MarkedAllocator& allocator) -> IterationStatus {
603                 allocator.snapshotUnsweptForEdenCollection();
604                 return IterationStatus::Continue;
605             });
606     } else {
607         forEachAllocator(
608             [&] (MarkedAllocator& allocator) -> IterationStatus {
609                 allocator.snapshotUnsweptForFullCollection();
610                 return IterationStatus::Continue;
611             });
612     }
613 }
614
615 void MarkedSpace::assertNoUnswept()
616 {
617     if (ASSERT_DISABLED)
618         return;
619     forEachAllocator(
620         [&] (MarkedAllocator& allocator) -> IterationStatus {
621             allocator.assertNoUnswept();
622             return IterationStatus::Continue;
623         });
624 }
625
626 void MarkedSpace::dumpBits(PrintStream& out)
627 {
628     forEachAllocator(
629         [&] (MarkedAllocator& allocator) -> IterationStatus {
630             out.print("Bits for ", allocator, ":\n");
631             allocator.dumpBits(out);
632             return IterationStatus::Continue;
633         });
634 }
635
636 } // namespace JSC