1ccba3bcfecafb7a3e6dabd372c06af384c1c789
[WebKit-https.git] / Source / JavaScriptCore / heap / MarkedSpace.cpp
1 /*
2  *  Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2016 Apple Inc. All rights reserved.
3  *  Copyright (C) 2007 Eric Seidel <eric@webkit.org>
4  *
5  *  This library is free software; you can redistribute it and/or
6  *  modify it under the terms of the GNU Lesser General Public
7  *  License as published by the Free Software Foundation; either
8  *  version 2 of the License, or (at your option) any later version.
9  *
10  *  This library is distributed in the hope that it will be useful,
11  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
12  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  *  Lesser General Public License for more details.
14  *
15  *  You should have received a copy of the GNU Lesser General Public
16  *  License along with this library; if not, write to the Free Software
17  *  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA
18  *
19  */
20
21 #include "config.h"
22 #include "MarkedSpace.h"
23
24 #include "IncrementalSweeper.h"
25 #include "JSObject.h"
26 #include "JSCInlines.h"
27 #include "MarkedBlockInlines.h"
28 #include "SuperSampler.h"
29 #include <wtf/ListDump.h>
30
31 namespace JSC {
32
33 std::array<size_t, MarkedSpace::numSizeClasses> MarkedSpace::s_sizeClassForSizeStep;
34
35 namespace {
36
37 const Vector<size_t>& sizeClasses()
38 {
39     static Vector<size_t>* result;
40     static std::once_flag once;
41     std::call_once(
42         once,
43         [] {
44             result = new Vector<size_t>();
45             
46             auto add = [&] (size_t sizeClass) {
47                 if (Options::dumpSizeClasses())
48                     dataLog("Adding JSC MarkedSpace size class: ", sizeClass, "\n");
49                 // Perform some validation as we go.
50                 RELEASE_ASSERT(!(sizeClass % MarkedSpace::sizeStep));
51                 if (result->isEmpty())
52                     RELEASE_ASSERT(sizeClass == MarkedSpace::sizeStep);
53                 else
54                     RELEASE_ASSERT(sizeClass > result->last());
55                 result->append(sizeClass);
56             };
57             
58             // This is a definition of the size classes in our GC. It must define all of the
59             // size classes from sizeStep up to largeCutoff.
60     
61             // Have very precise size classes for the small stuff. This is a loop to make it easy to reduce
62             // atomSize.
63             for (size_t size = MarkedSpace::sizeStep; size < MarkedSpace::preciseCutoff; size += MarkedSpace::sizeStep)
64                 add(size);
65             
66             // We want to make sure that the remaining size classes minimize internal fragmentation (i.e.
67             // the wasted space at the tail end of a MarkedBlock) while proceeding roughly in an exponential
68             // way starting at just above the precise size classes to four cells per block.
69             
70             if (Options::dumpSizeClasses())
71                 dataLog("    Marked block payload size: ", static_cast<size_t>(MarkedSpace::blockPayload), "\n");
72             
73             for (unsigned i = 0; ; ++i) {
74                 double approximateSize = MarkedSpace::preciseCutoff * pow(Options::sizeClassProgression(), i);
75                 
76                 if (Options::dumpSizeClasses())
77                     dataLog("    Next size class as a double: ", approximateSize, "\n");
78         
79                 size_t approximateSizeInBytes = static_cast<size_t>(approximateSize);
80         
81                 if (Options::dumpSizeClasses())
82                     dataLog("    Next size class as bytes: ", approximateSizeInBytes, "\n");
83         
84                 // Make sure that the computer did the math correctly.
85                 RELEASE_ASSERT(approximateSizeInBytes >= MarkedSpace::preciseCutoff);
86                 
87                 if (approximateSizeInBytes > MarkedSpace::largeCutoff)
88                     break;
89                 
90                 size_t sizeClass =
91                     WTF::roundUpToMultipleOf<MarkedSpace::sizeStep>(approximateSizeInBytes);
92                 
93                 if (Options::dumpSizeClasses())
94                     dataLog("    Size class: ", sizeClass, "\n");
95                 
96                 // Optimize the size class so that there isn't any slop at the end of the block's
97                 // payload.
98                 unsigned cellsPerBlock = MarkedSpace::blockPayload / sizeClass;
99                 size_t possiblyBetterSizeClass = (MarkedSpace::blockPayload / cellsPerBlock) & ~(MarkedSpace::sizeStep - 1);
100                 
101                 if (Options::dumpSizeClasses())
102                     dataLog("    Possibly better size class: ", possiblyBetterSizeClass, "\n");
103
104                 // The size class we just came up with is better than the other one if it reduces
105                 // total wastage assuming we only allocate cells of that size.
106                 size_t originalWastage = MarkedSpace::blockPayload - cellsPerBlock * sizeClass;
107                 size_t newWastage = (possiblyBetterSizeClass - sizeClass) * cellsPerBlock;
108                 
109                 if (Options::dumpSizeClasses())
110                     dataLog("    Original wastage: ", originalWastage, ", new wastage: ", newWastage, "\n");
111                 
112                 size_t betterSizeClass;
113                 if (newWastage > originalWastage)
114                     betterSizeClass = sizeClass;
115                 else
116                     betterSizeClass = possiblyBetterSizeClass;
117                 
118                 if (Options::dumpSizeClasses())
119                     dataLog("    Choosing size class: ", betterSizeClass, "\n");
120                 
121                 if (betterSizeClass == result->last()) {
122                     // Defense for when expStep is small.
123                     continue;
124                 }
125                 
126                 // This is usually how we get out of the loop.
127                 if (betterSizeClass > MarkedSpace::largeCutoff
128                     || betterSizeClass > Options::largeAllocationCutoff())
129                     break;
130                 
131                 add(betterSizeClass);
132             }
133             
134             if (Options::dumpSizeClasses())
135                 dataLog("JSC Heap MarkedSpace size class dump: ", listDump(*result), "\n");
136
137             // We have an optimiation in MarkedSpace::optimalSizeFor() that assumes things about
138             // the size class table. This checks our results against that function's assumptions.
139             for (size_t size = MarkedSpace::sizeStep, i = 0; size <= MarkedSpace::preciseCutoff; size += MarkedSpace::sizeStep, i++)
140                 RELEASE_ASSERT(result->at(i) == size);
141         });
142     return *result;
143 }
144
145 template<typename TableType, typename SizeClassCons, typename DefaultCons>
146 void buildSizeClassTable(TableType& table, const SizeClassCons& cons, const DefaultCons& defaultCons)
147 {
148     size_t nextIndex = 0;
149     for (size_t sizeClass : sizeClasses()) {
150         auto entry = cons(sizeClass);
151         size_t index = MarkedSpace::sizeClassToIndex(sizeClass);
152         for (size_t i = nextIndex; i <= index; ++i)
153             table[i] = entry;
154         nextIndex = index + 1;
155     }
156     for (size_t i = nextIndex; i < MarkedSpace::numSizeClasses; ++i)
157         table[i] = defaultCons(MarkedSpace::indexToSizeClass(i));
158 }
159
160 } // anonymous namespace
161
162 void MarkedSpace::initializeSizeClassForStepSize()
163 {
164     // We call this multiple times and we may call it simultaneously from multiple threads. That's
165     // OK, since it always stores the same values into the table.
166     
167     buildSizeClassTable(
168         s_sizeClassForSizeStep,
169         [&] (size_t sizeClass) -> size_t {
170             return sizeClass;
171         },
172         [&] (size_t sizeClass) -> size_t {
173             return sizeClass;
174         });
175 }
176
177 MarkedSpace::MarkedSpace(Heap* heap)
178     : m_heap(heap)
179     , m_capacity(0)
180     , m_isIterating(false)
181 {
182     initializeSizeClassForStepSize();
183     
184     forEachSubspace(
185         [&] (Subspace& subspace, AllocatorAttributes attributes) -> IterationStatus {
186             subspace.attributes = attributes;
187             
188             buildSizeClassTable(
189                 subspace.allocatorForSizeStep,
190                 [&] (size_t sizeClass) -> MarkedAllocator* {
191                     return subspace.bagOfAllocators.add(heap, this, sizeClass, attributes);
192                 },
193                 [&] (size_t) -> MarkedAllocator* {
194                     return nullptr;
195                 });
196             
197             return IterationStatus::Continue;
198         });
199     
200     MarkedAllocator* previous = nullptr;
201     forEachSubspace(
202         [&] (Subspace& subspace, AllocatorAttributes) -> IterationStatus {
203             for (MarkedAllocator* allocator : subspace.bagOfAllocators) {
204                 allocator->setNextAllocator(previous);
205                 previous = allocator;
206             }
207             
208             return IterationStatus::Continue;
209         });
210     m_firstAllocator = previous;
211     m_allocatorForEmptyAllocation = previous;
212 }
213
214 MarkedSpace::~MarkedSpace()
215 {
216     forEachBlock(
217         [&] (MarkedBlock::Handle* block) {
218             freeBlock(block);
219         });
220     for (LargeAllocation* allocation : m_largeAllocations)
221         allocation->destroy();
222     ASSERT(!m_blocks.set().size());
223 }
224
225 void MarkedSpace::lastChanceToFinalize()
226 {
227     stopAllocating();
228     forEachAllocator(
229         [&] (MarkedAllocator& allocator) -> IterationStatus {
230             allocator.lastChanceToFinalize();
231             return IterationStatus::Continue;
232         });
233     for (LargeAllocation* allocation : m_largeAllocations)
234         allocation->lastChanceToFinalize();
235 }
236
237 void* MarkedSpace::allocate(Subspace& subspace, size_t bytes)
238 {
239     if (MarkedAllocator* allocator = allocatorFor(subspace, bytes)) {
240         void* result = allocator->allocate();
241         return result;
242     }
243     return allocateLarge(subspace, bytes);
244 }
245
246 void* MarkedSpace::tryAllocate(Subspace& subspace, size_t bytes)
247 {
248     if (MarkedAllocator* allocator = allocatorFor(subspace, bytes)) {
249         void* result = allocator->tryAllocate();
250         return result;
251     }
252     return tryAllocateLarge(subspace, bytes);
253 }
254
255 void* MarkedSpace::allocateLarge(Subspace& subspace, size_t size)
256 {
257     void* result = tryAllocateLarge(subspace, size);
258     RELEASE_ASSERT(result);
259     return result;
260 }
261
262 void* MarkedSpace::tryAllocateLarge(Subspace& subspace, size_t size)
263 {
264     m_heap->collectIfNecessaryOrDefer();
265     
266     size = WTF::roundUpToMultipleOf<sizeStep>(size);
267     LargeAllocation* allocation = LargeAllocation::tryCreate(*m_heap, size, subspace.attributes);
268     if (!allocation)
269         return nullptr;
270     
271     m_largeAllocations.append(allocation);
272     m_heap->didAllocate(size);
273     m_capacity += size;
274     return allocation->cell();
275 }
276
277 void MarkedSpace::sweep()
278 {
279     m_heap->sweeper()->willFinishSweeping();
280     forEachAllocator(
281         [&] (MarkedAllocator& allocator) -> IterationStatus {
282             allocator.sweep();
283             return IterationStatus::Continue;
284         });
285 }
286
287 void MarkedSpace::sweepLargeAllocations()
288 {
289     RELEASE_ASSERT(m_largeAllocationsNurseryOffset == m_largeAllocations.size());
290     unsigned srcIndex = m_largeAllocationsNurseryOffsetForSweep;
291     unsigned dstIndex = srcIndex;
292     while (srcIndex < m_largeAllocations.size()) {
293         LargeAllocation* allocation = m_largeAllocations[srcIndex++];
294         allocation->sweep();
295         if (allocation->isEmpty()) {
296             m_capacity -= allocation->cellSize();
297             allocation->destroy();
298             continue;
299         }
300         m_largeAllocations[dstIndex++] = allocation;
301     }
302     m_largeAllocations.resize(dstIndex);
303     m_largeAllocationsNurseryOffset = m_largeAllocations.size();
304 }
305
306 void MarkedSpace::prepareForAllocation()
307 {
308     forEachAllocator(
309         [&] (MarkedAllocator& allocator) -> IterationStatus {
310             allocator.prepareForAllocation();
311             return IterationStatus::Continue;
312         });
313
314     m_activeWeakSets.takeFrom(m_newActiveWeakSets);
315     
316     if (m_heap->operationInProgress() == EdenCollection)
317         m_largeAllocationsNurseryOffsetForSweep = m_largeAllocationsNurseryOffset;
318     else
319         m_largeAllocationsNurseryOffsetForSweep = 0;
320     m_largeAllocationsNurseryOffset = m_largeAllocations.size();
321     
322     m_allocatorForEmptyAllocation = m_firstAllocator;
323 }
324
325 void MarkedSpace::visitWeakSets(HeapRootVisitor& heapRootVisitor)
326 {
327     auto visit = [&] (WeakSet* weakSet) {
328         weakSet->visit(heapRootVisitor);
329     };
330     
331     m_newActiveWeakSets.forEach(visit);
332     
333     if (m_heap->operationInProgress() == FullCollection)
334         m_activeWeakSets.forEach(visit);
335 }
336
337 void MarkedSpace::reapWeakSets()
338 {
339     auto visit = [&] (WeakSet* weakSet) {
340         weakSet->reap();
341     };
342     
343     m_newActiveWeakSets.forEach(visit);
344     
345     if (m_heap->operationInProgress() == FullCollection)
346         m_activeWeakSets.forEach(visit);
347 }
348
349 void MarkedSpace::stopAllocating()
350 {
351     ASSERT(!isIterating());
352     forEachAllocator(
353         [&] (MarkedAllocator& allocator) -> IterationStatus {
354             allocator.stopAllocating();
355             return IterationStatus::Continue;
356         });
357 }
358
359 void MarkedSpace::prepareForMarking()
360 {
361     if (m_heap->operationInProgress() == EdenCollection)
362         m_largeAllocationsOffsetForThisCollection = m_largeAllocationsNurseryOffset;
363     else
364         m_largeAllocationsOffsetForThisCollection = 0;
365     m_largeAllocationsForThisCollectionBegin = m_largeAllocations.begin() + m_largeAllocationsOffsetForThisCollection;
366     m_largeAllocationsForThisCollectionSize = m_largeAllocations.size() - m_largeAllocationsOffsetForThisCollection;
367     m_largeAllocationsForThisCollectionEnd = m_largeAllocations.end();
368     RELEASE_ASSERT(m_largeAllocationsForThisCollectionEnd == m_largeAllocationsForThisCollectionBegin + m_largeAllocationsForThisCollectionSize);
369     std::sort(
370         m_largeAllocationsForThisCollectionBegin, m_largeAllocationsForThisCollectionEnd,
371         [&] (LargeAllocation* a, LargeAllocation* b) {
372             return a < b;
373         });
374 }
375
376 void MarkedSpace::resumeAllocating()
377 {
378     ASSERT(isIterating());
379     forEachAllocator(
380         [&] (MarkedAllocator& allocator) -> IterationStatus {
381             allocator.resumeAllocating();
382             return IterationStatus::Continue;
383         });
384     // Nothing to do for LargeAllocations.
385 }
386
387 bool MarkedSpace::isPagedOut(double deadline)
388 {
389     bool result = false;
390     forEachAllocator(
391         [&] (MarkedAllocator& allocator) -> IterationStatus {
392             if (allocator.isPagedOut(deadline)) {
393                 result = true;
394                 return IterationStatus::Done;
395             }
396             return IterationStatus::Continue;
397         });
398     // FIXME: Consider taking LargeAllocations into account here.
399     return result;
400 }
401
402 void MarkedSpace::freeBlock(MarkedBlock::Handle* block)
403 {
404     block->allocator()->removeBlock(block);
405     m_capacity -= MarkedBlock::blockSize;
406     m_blocks.remove(&block->block());
407     delete block;
408 }
409
410 void MarkedSpace::freeOrShrinkBlock(MarkedBlock::Handle* block)
411 {
412     if (!block->isEmpty()) {
413         block->shrink();
414         return;
415     }
416
417     freeBlock(block);
418 }
419
420 void MarkedSpace::shrink()
421 {
422     forEachAllocator(
423         [&] (MarkedAllocator& allocator) -> IterationStatus {
424             allocator.shrink();
425             return IterationStatus::Continue;
426         });
427 }
428
429 void MarkedSpace::clearNewlyAllocated()
430 {
431     forEachAllocator(
432         [&] (MarkedAllocator& allocator) -> IterationStatus {
433             if (MarkedBlock::Handle* block = allocator.takeLastActiveBlock())
434                 block->clearNewlyAllocated();
435             return IterationStatus::Continue;
436         });
437     
438     for (unsigned i = m_largeAllocationsOffsetForThisCollection; i < m_largeAllocations.size(); ++i)
439         m_largeAllocations[i]->clearNewlyAllocated();
440
441     if (!ASSERT_DISABLED) {
442         forEachBlock(
443             [&] (MarkedBlock::Handle* block) {
444                 ASSERT_UNUSED(block, !block->clearNewlyAllocated());
445             });
446         
447         for (LargeAllocation* allocation : m_largeAllocations)
448             ASSERT_UNUSED(allocation, !allocation->isNewlyAllocated());
449     }
450 }
451
452 void MarkedSpace::beginMarking()
453 {
454     if (m_heap->operationInProgress() == FullCollection) {
455         forEachAllocator(
456             [&] (MarkedAllocator& allocator) -> IterationStatus {
457                 allocator.beginMarkingForFullCollection();
458                 return IterationStatus::Continue;
459             });
460
461         m_version = nextVersion(m_version);
462         
463         if (UNLIKELY(m_version == initialVersion)) {
464             // Oh no! Version wrap-around! We handle this by setting all block versions to null.
465             forEachBlock(
466                 [&] (MarkedBlock::Handle* handle) {
467                     handle->block().resetVersion();
468                 });
469         }
470         
471         for (LargeAllocation* allocation : m_largeAllocations)
472             allocation->flip();
473     }
474
475     if (!ASSERT_DISABLED) {
476         forEachBlock(
477             [&] (MarkedBlock::Handle* block) {
478                 if (block->needsFlip())
479                     return;
480                 ASSERT(!block->isFreeListed());
481             });
482     }
483     
484     m_isMarking = true;
485 }
486
487 void MarkedSpace::endMarking()
488 {
489     forEachAllocator(
490         [&] (MarkedAllocator& allocator) -> IterationStatus {
491             allocator.endMarking();
492             return IterationStatus::Continue;
493         });
494     
495     m_isMarking = false;
496 }
497
498 void MarkedSpace::willStartIterating()
499 {
500     ASSERT(!isIterating());
501     stopAllocating();
502     m_isIterating = true;
503 }
504
505 void MarkedSpace::didFinishIterating()
506 {
507     ASSERT(isIterating());
508     resumeAllocating();
509     m_isIterating = false;
510 }
511
512 size_t MarkedSpace::objectCount()
513 {
514     size_t result = 0;
515     forEachBlock(
516         [&] (MarkedBlock::Handle* block) {
517             result += block->markCount();
518         });
519     for (LargeAllocation* allocation : m_largeAllocations) {
520         if (allocation->isMarked())
521             result++;
522     }
523     return result;
524 }
525
526 size_t MarkedSpace::size()
527 {
528     size_t result = 0;
529     forEachBlock(
530         [&] (MarkedBlock::Handle* block) {
531             result += block->markCount() * block->cellSize();
532         });
533     for (LargeAllocation* allocation : m_largeAllocations) {
534         if (allocation->isMarked())
535             result += allocation->cellSize();
536     }
537     return result;
538 }
539
540 size_t MarkedSpace::capacity()
541 {
542     return m_capacity;
543 }
544
545 void MarkedSpace::addActiveWeakSet(WeakSet* weakSet)
546 {
547     // We conservatively assume that the WeakSet should belong in the new set. In fact, some weak
548     // sets might contain new weak handles even though they are tied to old objects. This slightly
549     // increases the amount of scanning that an eden collection would have to do, but the effect
550     // ought to be small.
551     m_newActiveWeakSets.append(weakSet);
552 }
553
554 void MarkedSpace::didAddBlock(MarkedBlock::Handle* block)
555 {
556     // WARNING: This function is called before block is fully initialized. The block will not know
557     // its cellSize() or attributes(). The latter implies that you can't ask things like
558     // needsDestruction().
559     m_capacity += MarkedBlock::blockSize;
560     m_blocks.add(&block->block());
561 }
562
563 void MarkedSpace::didAllocateInBlock(MarkedBlock::Handle* block)
564 {
565     if (block->weakSet().isOnList()) {
566         block->weakSet().remove();
567         m_newActiveWeakSets.append(&block->weakSet());
568     }
569 }
570
571 MarkedBlock::Handle* MarkedSpace::findEmptyBlockToSteal()
572 {
573     for (; m_allocatorForEmptyAllocation; m_allocatorForEmptyAllocation = m_allocatorForEmptyAllocation->nextAllocator()) {
574         if (MarkedBlock::Handle* block = m_allocatorForEmptyAllocation->findEmptyBlockToSteal())
575             return block;
576     }
577     return nullptr;
578 }
579
580 void MarkedSpace::snapshotUnswept()
581 {
582     if (m_heap->operationInProgress() == EdenCollection) {
583         forEachAllocator(
584             [&] (MarkedAllocator& allocator) -> IterationStatus {
585                 allocator.snapshotUnsweptForEdenCollection();
586                 return IterationStatus::Continue;
587             });
588     } else {
589         forEachAllocator(
590             [&] (MarkedAllocator& allocator) -> IterationStatus {
591                 allocator.snapshotUnsweptForFullCollection();
592                 return IterationStatus::Continue;
593             });
594     }
595 }
596
597 void MarkedSpace::assertNoUnswept()
598 {
599     if (ASSERT_DISABLED)
600         return;
601     forEachAllocator(
602         [&] (MarkedAllocator& allocator) -> IterationStatus {
603             allocator.assertNoUnswept();
604             return IterationStatus::Continue;
605         });
606 }
607
608 void MarkedSpace::dumpBits(PrintStream& out)
609 {
610     forEachAllocator(
611         [&] (MarkedAllocator& allocator) -> IterationStatus {
612             out.print("Bits for ", allocator, ":\n");
613             allocator.dumpBits(out);
614             return IterationStatus::Continue;
615         });
616 }
617
618 } // namespace JSC