02e750bee5d68c23a7b586576ebc9ff65ec33c0a
[WebKit-https.git] / Source / JavaScriptCore / heap / MarkedSpace.cpp
1 /*
2  *  Copyright (C) 2003-2018 Apple Inc. All rights reserved.
3  *  Copyright (C) 2007 Eric Seidel <eric@webkit.org>
4  *
5  *  This library is free software; you can redistribute it and/or
6  *  modify it under the terms of the GNU Lesser General Public
7  *  License as published by the Free Software Foundation; either
8  *  version 2 of the License, or (at your option) any later version.
9  *
10  *  This library is distributed in the hope that it will be useful,
11  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
12  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  *  Lesser General Public License for more details.
14  *
15  *  You should have received a copy of the GNU Lesser General Public
16  *  License along with this library; if not, write to the Free Software
17  *  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA
18  *
19  */
20
21 #include "config.h"
22 #include "MarkedSpace.h"
23
24 #include "BlockDirectoryInlines.h"
25 #include "FunctionCodeBlock.h"
26 #include "IncrementalSweeper.h"
27 #include "JSObject.h"
28 #include "JSCInlines.h"
29 #include "MarkedBlockInlines.h"
30 #include <wtf/ListDump.h>
31
32 namespace JSC {
33
34 std::array<size_t, MarkedSpace::numSizeClasses> MarkedSpace::s_sizeClassForSizeStep;
35
36 namespace {
37
38 const Vector<size_t>& sizeClasses()
39 {
40     static Vector<size_t>* result;
41     static std::once_flag once;
42     std::call_once(
43         once,
44         [] {
45             result = new Vector<size_t>();
46             
47             if (Options::dumpSizeClasses()) {
48                 dataLog("Block size: ", MarkedBlock::blockSize, "\n");
49                 dataLog("Footer size: ", sizeof(MarkedBlock::Footer), "\n");
50             }
51             
52             auto add = [&] (size_t sizeClass) {
53                 sizeClass = WTF::roundUpToMultipleOf<MarkedBlock::atomSize>(sizeClass);
54                 if (Options::dumpSizeClasses())
55                     dataLog("Adding JSC MarkedSpace size class: ", sizeClass, "\n");
56                 // Perform some validation as we go.
57                 RELEASE_ASSERT(!(sizeClass % MarkedSpace::sizeStep));
58                 if (result->isEmpty())
59                     RELEASE_ASSERT(sizeClass == MarkedSpace::sizeStep);
60                 result->append(sizeClass);
61             };
62             
63             // This is a definition of the size classes in our GC. It must define all of the
64             // size classes from sizeStep up to largeCutoff.
65     
66             // Have very precise size classes for the small stuff. This is a loop to make it easy to reduce
67             // atomSize.
68             for (size_t size = MarkedSpace::sizeStep; size < MarkedSpace::preciseCutoff; size += MarkedSpace::sizeStep)
69                 add(size);
70             
71             // We want to make sure that the remaining size classes minimize internal fragmentation (i.e.
72             // the wasted space at the tail end of a MarkedBlock) while proceeding roughly in an exponential
73             // way starting at just above the precise size classes to four cells per block.
74             
75             if (Options::dumpSizeClasses())
76                 dataLog("    Marked block payload size: ", static_cast<size_t>(MarkedSpace::blockPayload), "\n");
77             
78             for (unsigned i = 0; ; ++i) {
79                 double approximateSize = MarkedSpace::preciseCutoff * pow(Options::sizeClassProgression(), i);
80                 
81                 if (Options::dumpSizeClasses())
82                     dataLog("    Next size class as a double: ", approximateSize, "\n");
83         
84                 size_t approximateSizeInBytes = static_cast<size_t>(approximateSize);
85         
86                 if (Options::dumpSizeClasses())
87                     dataLog("    Next size class as bytes: ", approximateSizeInBytes, "\n");
88         
89                 // Make sure that the computer did the math correctly.
90                 RELEASE_ASSERT(approximateSizeInBytes >= MarkedSpace::preciseCutoff);
91                 
92                 if (approximateSizeInBytes > MarkedSpace::largeCutoff)
93                     break;
94                 
95                 size_t sizeClass =
96                     WTF::roundUpToMultipleOf<MarkedSpace::sizeStep>(approximateSizeInBytes);
97                 
98                 if (Options::dumpSizeClasses())
99                     dataLog("    Size class: ", sizeClass, "\n");
100                 
101                 // Optimize the size class so that there isn't any slop at the end of the block's
102                 // payload.
103                 unsigned cellsPerBlock = MarkedSpace::blockPayload / sizeClass;
104                 size_t possiblyBetterSizeClass = (MarkedSpace::blockPayload / cellsPerBlock) & ~(MarkedSpace::sizeStep - 1);
105                 
106                 if (Options::dumpSizeClasses())
107                     dataLog("    Possibly better size class: ", possiblyBetterSizeClass, "\n");
108
109                 // The size class we just came up with is better than the other one if it reduces
110                 // total wastage assuming we only allocate cells of that size.
111                 size_t originalWastage = MarkedSpace::blockPayload - cellsPerBlock * sizeClass;
112                 size_t newWastage = (possiblyBetterSizeClass - sizeClass) * cellsPerBlock;
113                 
114                 if (Options::dumpSizeClasses())
115                     dataLog("    Original wastage: ", originalWastage, ", new wastage: ", newWastage, "\n");
116                 
117                 size_t betterSizeClass;
118                 if (newWastage > originalWastage)
119                     betterSizeClass = sizeClass;
120                 else
121                     betterSizeClass = possiblyBetterSizeClass;
122                 
123                 if (Options::dumpSizeClasses())
124                     dataLog("    Choosing size class: ", betterSizeClass, "\n");
125                 
126                 if (betterSizeClass == result->last()) {
127                     // Defense for when expStep is small.
128                     continue;
129                 }
130                 
131                 // This is usually how we get out of the loop.
132                 if (betterSizeClass > MarkedSpace::largeCutoff
133                     || betterSizeClass > Options::largeAllocationCutoff())
134                     break;
135                 
136                 add(betterSizeClass);
137             }
138
139             // Manually inject size classes for objects we know will be allocated in high volume.
140             // FIXME: All of these things should have IsoSubspaces.
141             // https://bugs.webkit.org/show_bug.cgi?id=179876
142             add(256);
143
144             {
145                 // Sort and deduplicate.
146                 std::sort(result->begin(), result->end());
147                 auto it = std::unique(result->begin(), result->end());
148                 result->shrinkCapacity(it - result->begin());
149             }
150
151             if (Options::dumpSizeClasses())
152                 dataLog("JSC Heap MarkedSpace size class dump: ", listDump(*result), "\n");
153
154             // We have an optimiation in MarkedSpace::optimalSizeFor() that assumes things about
155             // the size class table. This checks our results against that function's assumptions.
156             for (size_t size = MarkedSpace::sizeStep, i = 0; size <= MarkedSpace::preciseCutoff; size += MarkedSpace::sizeStep, i++)
157                 RELEASE_ASSERT(result->at(i) == size);
158         });
159     return *result;
160 }
161
162 template<typename TableType, typename SizeClassCons, typename DefaultCons>
163 void buildSizeClassTable(TableType& table, const SizeClassCons& cons, const DefaultCons& defaultCons)
164 {
165     size_t nextIndex = 0;
166     for (size_t sizeClass : sizeClasses()) {
167         auto entry = cons(sizeClass);
168         size_t index = MarkedSpace::sizeClassToIndex(sizeClass);
169         for (size_t i = nextIndex; i <= index; ++i)
170             table[i] = entry;
171         nextIndex = index + 1;
172     }
173     ASSERT(MarkedSpace::sizeClassToIndex(MarkedSpace::largeCutoff - 1) < MarkedSpace::numSizeClasses);
174     for (size_t i = nextIndex; i < MarkedSpace::numSizeClasses; ++i)
175         table[i] = defaultCons(MarkedSpace::indexToSizeClass(i));
176 }
177
178 } // anonymous namespace
179
180 void MarkedSpace::initializeSizeClassForStepSize()
181 {
182     static std::once_flag flag;
183     std::call_once(
184         flag,
185         [] {
186             buildSizeClassTable(
187                 s_sizeClassForSizeStep,
188                 [&] (size_t sizeClass) -> size_t {
189                     return sizeClass;
190                 },
191                 [&] (size_t sizeClass) -> size_t {
192                     return sizeClass;
193                 });
194         });
195 }
196
197 MarkedSpace::MarkedSpace(Heap* heap)
198     : m_heap(heap)
199 {
200     initializeSizeClassForStepSize();
201 }
202
203 MarkedSpace::~MarkedSpace()
204 {
205     ASSERT(!m_blocks.set().size());
206 }
207
208 void MarkedSpace::freeMemory()
209 {
210     forEachBlock(
211         [&] (MarkedBlock::Handle* block) {
212             freeBlock(block);
213         });
214     for (LargeAllocation* allocation : m_largeAllocations)
215         allocation->destroy();
216     forEachSubspace([&](Subspace& subspace) {
217         if (subspace.isIsoSubspace())
218             static_cast<IsoSubspace&>(subspace).destroyLowerTierFreeList();
219         return IterationStatus::Continue;
220     });
221 }
222
223 void MarkedSpace::lastChanceToFinalize()
224 {
225     forEachDirectory(
226         [&] (BlockDirectory& directory) -> IterationStatus {
227             directory.lastChanceToFinalize();
228             return IterationStatus::Continue;
229         });
230     for (LargeAllocation* allocation : m_largeAllocations)
231         allocation->lastChanceToFinalize();
232     // We do not call lastChanceToFinalize for lower-tier swept cells since we need nothing to do.
233 }
234
235 void MarkedSpace::sweep()
236 {
237     m_heap->sweeper().stopSweeping();
238     forEachDirectory(
239         [&] (BlockDirectory& directory) -> IterationStatus {
240             directory.sweep();
241             return IterationStatus::Continue;
242         });
243 }
244
245 void MarkedSpace::sweepLargeAllocations()
246 {
247     RELEASE_ASSERT(m_largeAllocationsNurseryOffset == m_largeAllocations.size());
248     unsigned srcIndex = m_largeAllocationsNurseryOffsetForSweep;
249     unsigned dstIndex = srcIndex;
250     while (srcIndex < m_largeAllocations.size()) {
251         LargeAllocation* allocation = m_largeAllocations[srcIndex++];
252         allocation->sweep();
253         if (allocation->isEmpty()) {
254             if (auto* set = largeAllocationSet())
255                 set->remove(allocation->cell());
256             if (allocation->isLowerTier())
257                 static_cast<IsoSubspace*>(allocation->subspace())->sweepLowerTierCell(allocation);
258             else {
259                 m_capacity -= allocation->cellSize();
260                 allocation->destroy();
261             }
262             continue;
263         }
264         allocation->setIndexInSpace(dstIndex);
265         m_largeAllocations[dstIndex++] = allocation;
266     }
267     m_largeAllocations.shrink(dstIndex);
268     m_largeAllocationsNurseryOffset = m_largeAllocations.size();
269 }
270
271 void MarkedSpace::prepareForAllocation()
272 {
273     ASSERT(!Thread::mayBeGCThread() || m_heap->worldIsStopped());
274     for (Subspace* subspace : m_subspaces)
275         subspace->prepareForAllocation();
276
277     m_activeWeakSets.takeFrom(m_newActiveWeakSets);
278     
279     if (m_heap->collectionScope() == CollectionScope::Eden)
280         m_largeAllocationsNurseryOffsetForSweep = m_largeAllocationsNurseryOffset;
281     else
282         m_largeAllocationsNurseryOffsetForSweep = 0;
283     m_largeAllocationsNurseryOffset = m_largeAllocations.size();
284 }
285
286 void MarkedSpace::enableLargeAllocationTracking()
287 {
288     m_largeAllocationSet = makeUnique<HashSet<HeapCell*>>();
289     for (auto* allocation : m_largeAllocations)
290         m_largeAllocationSet->add(allocation->cell());
291 }
292
293 void MarkedSpace::visitWeakSets(SlotVisitor& visitor)
294 {
295     auto visit = [&] (WeakSet* weakSet) {
296         weakSet->visit(visitor);
297     };
298     
299     m_newActiveWeakSets.forEach(visit);
300     
301     if (m_heap->collectionScope() == CollectionScope::Full)
302         m_activeWeakSets.forEach(visit);
303 }
304
305 void MarkedSpace::reapWeakSets()
306 {
307     auto visit = [&] (WeakSet* weakSet) {
308         weakSet->reap();
309     };
310     
311     m_newActiveWeakSets.forEach(visit);
312     
313     if (m_heap->collectionScope() == CollectionScope::Full)
314         m_activeWeakSets.forEach(visit);
315 }
316
317 void MarkedSpace::stopAllocating()
318 {
319     ASSERT(!isIterating());
320     forEachDirectory(
321         [&] (BlockDirectory& directory) -> IterationStatus {
322             directory.stopAllocating();
323             return IterationStatus::Continue;
324         });
325 }
326
327 void MarkedSpace::stopAllocatingForGood()
328 {
329     ASSERT(!isIterating());
330     forEachDirectory(
331         [&] (BlockDirectory& directory) -> IterationStatus {
332             directory.stopAllocatingForGood();
333             return IterationStatus::Continue;
334         });
335 }
336
337 void MarkedSpace::prepareForConservativeScan()
338 {
339     m_largeAllocationsForThisCollectionBegin = m_largeAllocations.begin() + m_largeAllocationsOffsetForThisCollection;
340     m_largeAllocationsForThisCollectionSize = m_largeAllocations.size() - m_largeAllocationsOffsetForThisCollection;
341     m_largeAllocationsForThisCollectionEnd = m_largeAllocations.end();
342     RELEASE_ASSERT(m_largeAllocationsForThisCollectionEnd == m_largeAllocationsForThisCollectionBegin + m_largeAllocationsForThisCollectionSize);
343     
344     std::sort(
345         m_largeAllocationsForThisCollectionBegin, m_largeAllocationsForThisCollectionEnd,
346         [&] (LargeAllocation* a, LargeAllocation* b) {
347             return a < b;
348         });
349     unsigned index = m_largeAllocationsOffsetForThisCollection;
350     for (auto* start = m_largeAllocationsForThisCollectionBegin; start != m_largeAllocationsForThisCollectionEnd; ++start, ++index) {
351         (*start)->setIndexInSpace(index);
352         ASSERT(m_largeAllocations[index] == *start);
353         ASSERT(m_largeAllocations[index]->indexInSpace() == index);
354     }
355 }
356
357 void MarkedSpace::prepareForMarking()
358 {
359     if (m_heap->collectionScope() == CollectionScope::Eden)
360         m_largeAllocationsOffsetForThisCollection = m_largeAllocationsNurseryOffset;
361     else
362         m_largeAllocationsOffsetForThisCollection = 0;
363 }
364
365 void MarkedSpace::resumeAllocating()
366 {
367     forEachDirectory(
368         [&] (BlockDirectory& directory) -> IterationStatus {
369             directory.resumeAllocating();
370             return IterationStatus::Continue;
371         });
372     // Nothing to do for LargeAllocations.
373 }
374
375 bool MarkedSpace::isPagedOut(MonotonicTime deadline)
376 {
377     bool result = false;
378     forEachDirectory(
379         [&] (BlockDirectory& directory) -> IterationStatus {
380             if (directory.isPagedOut(deadline)) {
381                 result = true;
382                 return IterationStatus::Done;
383             }
384             return IterationStatus::Continue;
385         });
386     // FIXME: Consider taking LargeAllocations into account here.
387     return result;
388 }
389
390 void MarkedSpace::freeBlock(MarkedBlock::Handle* block)
391 {
392     block->directory()->removeBlock(block);
393     m_capacity -= MarkedBlock::blockSize;
394     m_blocks.remove(&block->block());
395     delete block;
396 }
397
398 void MarkedSpace::freeOrShrinkBlock(MarkedBlock::Handle* block)
399 {
400     if (!block->isEmpty()) {
401         block->shrink();
402         return;
403     }
404
405     freeBlock(block);
406 }
407
408 void MarkedSpace::shrink()
409 {
410     forEachDirectory(
411         [&] (BlockDirectory& directory) -> IterationStatus {
412             directory.shrink();
413             return IterationStatus::Continue;
414         });
415 }
416
417 void MarkedSpace::beginMarking()
418 {
419     if (m_heap->collectionScope() == CollectionScope::Full) {
420         forEachDirectory(
421             [&] (BlockDirectory& directory) -> IterationStatus {
422                 directory.beginMarkingForFullCollection();
423                 return IterationStatus::Continue;
424             });
425
426         if (UNLIKELY(nextVersion(m_markingVersion) == initialVersion)) {
427             forEachBlock(
428                 [&] (MarkedBlock::Handle* handle) {
429                     handle->block().resetMarks();
430                 });
431         }
432         
433         m_markingVersion = nextVersion(m_markingVersion);
434         
435         for (LargeAllocation* allocation : m_largeAllocations)
436             allocation->flip();
437     }
438
439     if (!ASSERT_DISABLED) {
440         forEachBlock(
441             [&] (MarkedBlock::Handle* block) {
442                 if (block->areMarksStale())
443                     return;
444                 ASSERT(!block->isFreeListed());
445             });
446     }
447     
448     m_isMarking = true;
449 }
450
451 void MarkedSpace::endMarking()
452 {
453     if (UNLIKELY(nextVersion(m_newlyAllocatedVersion) == initialVersion)) {
454         forEachBlock(
455             [&] (MarkedBlock::Handle* handle) {
456                 handle->block().resetAllocated();
457             });
458     }
459     
460     m_newlyAllocatedVersion = nextVersion(m_newlyAllocatedVersion);
461     
462     for (unsigned i = m_largeAllocationsOffsetForThisCollection; i < m_largeAllocations.size(); ++i)
463         m_largeAllocations[i]->clearNewlyAllocated();
464
465     if (!ASSERT_DISABLED) {
466         for (LargeAllocation* allocation : m_largeAllocations)
467             ASSERT_UNUSED(allocation, !allocation->isNewlyAllocated());
468     }
469
470     forEachDirectory(
471         [&] (BlockDirectory& directory) -> IterationStatus {
472             directory.endMarking();
473             return IterationStatus::Continue;
474         });
475     
476     m_isMarking = false;
477 }
478
479 void MarkedSpace::willStartIterating()
480 {
481     ASSERT(!isIterating());
482     stopAllocating();
483     m_isIterating = true;
484 }
485
486 void MarkedSpace::didFinishIterating()
487 {
488     ASSERT(isIterating());
489     resumeAllocating();
490     m_isIterating = false;
491 }
492
493 size_t MarkedSpace::objectCount()
494 {
495     size_t result = 0;
496     forEachBlock(
497         [&] (MarkedBlock::Handle* block) {
498             result += block->markCount();
499         });
500     for (LargeAllocation* allocation : m_largeAllocations) {
501         if (allocation->isMarked())
502             result++;
503     }
504     return result;
505 }
506
507 size_t MarkedSpace::size()
508 {
509     size_t result = 0;
510     forEachBlock(
511         [&] (MarkedBlock::Handle* block) {
512             result += block->markCount() * block->cellSize();
513         });
514     for (LargeAllocation* allocation : m_largeAllocations) {
515         if (allocation->isMarked())
516             result += allocation->cellSize();
517     }
518     return result;
519 }
520
521 size_t MarkedSpace::capacity()
522 {
523     return m_capacity;
524 }
525
526 void MarkedSpace::addActiveWeakSet(WeakSet* weakSet)
527 {
528     // We conservatively assume that the WeakSet should belong in the new set. In fact, some weak
529     // sets might contain new weak handles even though they are tied to old objects. This slightly
530     // increases the amount of scanning that an eden collection would have to do, but the effect
531     // ought to be small.
532     m_newActiveWeakSets.append(weakSet);
533 }
534
535 void MarkedSpace::didAddBlock(MarkedBlock::Handle* block)
536 {
537     // WARNING: This function is called before block is fully initialized. The block will not know
538     // its cellSize() or attributes(). The latter implies that you can't ask things like
539     // needsDestruction().
540     m_capacity += MarkedBlock::blockSize;
541     m_blocks.add(&block->block());
542 }
543
544 void MarkedSpace::didAllocateInBlock(MarkedBlock::Handle* block)
545 {
546     if (block->weakSet().isOnList()) {
547         block->weakSet().remove();
548         m_newActiveWeakSets.append(&block->weakSet());
549     }
550 }
551
552 void MarkedSpace::snapshotUnswept()
553 {
554     if (m_heap->collectionScope() == CollectionScope::Eden) {
555         forEachDirectory(
556             [&] (BlockDirectory& directory) -> IterationStatus {
557                 directory.snapshotUnsweptForEdenCollection();
558                 return IterationStatus::Continue;
559             });
560     } else {
561         forEachDirectory(
562             [&] (BlockDirectory& directory) -> IterationStatus {
563                 directory.snapshotUnsweptForFullCollection();
564                 return IterationStatus::Continue;
565             });
566     }
567 }
568
569 void MarkedSpace::assertNoUnswept()
570 {
571     if (ASSERT_DISABLED)
572         return;
573     forEachDirectory(
574         [&] (BlockDirectory& directory) -> IterationStatus {
575             directory.assertNoUnswept();
576             return IterationStatus::Continue;
577         });
578 }
579
580 void MarkedSpace::dumpBits(PrintStream& out)
581 {
582     forEachDirectory(
583         [&] (BlockDirectory& directory) -> IterationStatus {
584             out.print("Bits for ", directory, ":\n");
585             directory.dumpBits(out);
586             return IterationStatus::Continue;
587         });
588 }
589
590 void MarkedSpace::addBlockDirectory(const AbstractLocker&, BlockDirectory* directory)
591 {
592     directory->setNextDirectory(nullptr);
593     
594     WTF::storeStoreFence();
595
596     m_directories.append(std::mem_fn(&BlockDirectory::setNextDirectory), directory);
597 }
598
599 } // namespace JSC