[JSC] Shrink size of VM by lazily allocating IsoSubspaces for non-common types
[WebKit-https.git] / Source / JavaScriptCore / heap / MarkedSpace.cpp
1 /*
2  *  Copyright (C) 2003-2018 Apple Inc. All rights reserved.
3  *  Copyright (C) 2007 Eric Seidel <eric@webkit.org>
4  *
5  *  This library is free software; you can redistribute it and/or
6  *  modify it under the terms of the GNU Lesser General Public
7  *  License as published by the Free Software Foundation; either
8  *  version 2 of the License, or (at your option) any later version.
9  *
10  *  This library is distributed in the hope that it will be useful,
11  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
12  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  *  Lesser General Public License for more details.
14  *
15  *  You should have received a copy of the GNU Lesser General Public
16  *  License along with this library; if not, write to the Free Software
17  *  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA
18  *
19  */
20
21 #include "config.h"
22 #include "MarkedSpace.h"
23
24 #include "BlockDirectoryInlines.h"
25 #include "FunctionCodeBlock.h"
26 #include "IncrementalSweeper.h"
27 #include "JSObject.h"
28 #include "JSCInlines.h"
29 #include "MarkedBlockInlines.h"
30 #include <wtf/ListDump.h>
31
32 namespace JSC {
33
34 std::array<size_t, MarkedSpace::numSizeClasses> MarkedSpace::s_sizeClassForSizeStep;
35
36 namespace {
37
38 const Vector<size_t>& sizeClasses()
39 {
40     static Vector<size_t>* result;
41     static std::once_flag once;
42     std::call_once(
43         once,
44         [] {
45             result = new Vector<size_t>();
46             
47             if (Options::dumpSizeClasses()) {
48                 dataLog("Block size: ", MarkedBlock::blockSize, "\n");
49                 dataLog("Footer size: ", sizeof(MarkedBlock::Footer), "\n");
50             }
51             
52             auto add = [&] (size_t sizeClass) {
53                 sizeClass = WTF::roundUpToMultipleOf<MarkedBlock::atomSize>(sizeClass);
54                 if (Options::dumpSizeClasses())
55                     dataLog("Adding JSC MarkedSpace size class: ", sizeClass, "\n");
56                 // Perform some validation as we go.
57                 RELEASE_ASSERT(!(sizeClass % MarkedSpace::sizeStep));
58                 if (result->isEmpty())
59                     RELEASE_ASSERT(sizeClass == MarkedSpace::sizeStep);
60                 result->append(sizeClass);
61             };
62             
63             // This is a definition of the size classes in our GC. It must define all of the
64             // size classes from sizeStep up to largeCutoff.
65     
66             // Have very precise size classes for the small stuff. This is a loop to make it easy to reduce
67             // atomSize.
68             for (size_t size = MarkedSpace::sizeStep; size < MarkedSpace::preciseCutoff; size += MarkedSpace::sizeStep)
69                 add(size);
70             
71             // We want to make sure that the remaining size classes minimize internal fragmentation (i.e.
72             // the wasted space at the tail end of a MarkedBlock) while proceeding roughly in an exponential
73             // way starting at just above the precise size classes to four cells per block.
74             
75             if (Options::dumpSizeClasses())
76                 dataLog("    Marked block payload size: ", static_cast<size_t>(MarkedSpace::blockPayload), "\n");
77             
78             for (unsigned i = 0; ; ++i) {
79                 double approximateSize = MarkedSpace::preciseCutoff * pow(Options::sizeClassProgression(), i);
80                 
81                 if (Options::dumpSizeClasses())
82                     dataLog("    Next size class as a double: ", approximateSize, "\n");
83         
84                 size_t approximateSizeInBytes = static_cast<size_t>(approximateSize);
85         
86                 if (Options::dumpSizeClasses())
87                     dataLog("    Next size class as bytes: ", approximateSizeInBytes, "\n");
88         
89                 // Make sure that the computer did the math correctly.
90                 RELEASE_ASSERT(approximateSizeInBytes >= MarkedSpace::preciseCutoff);
91                 
92                 if (approximateSizeInBytes > MarkedSpace::largeCutoff)
93                     break;
94                 
95                 size_t sizeClass =
96                     WTF::roundUpToMultipleOf<MarkedSpace::sizeStep>(approximateSizeInBytes);
97                 
98                 if (Options::dumpSizeClasses())
99                     dataLog("    Size class: ", sizeClass, "\n");
100                 
101                 // Optimize the size class so that there isn't any slop at the end of the block's
102                 // payload.
103                 unsigned cellsPerBlock = MarkedSpace::blockPayload / sizeClass;
104                 size_t possiblyBetterSizeClass = (MarkedSpace::blockPayload / cellsPerBlock) & ~(MarkedSpace::sizeStep - 1);
105                 
106                 if (Options::dumpSizeClasses())
107                     dataLog("    Possibly better size class: ", possiblyBetterSizeClass, "\n");
108
109                 // The size class we just came up with is better than the other one if it reduces
110                 // total wastage assuming we only allocate cells of that size.
111                 size_t originalWastage = MarkedSpace::blockPayload - cellsPerBlock * sizeClass;
112                 size_t newWastage = (possiblyBetterSizeClass - sizeClass) * cellsPerBlock;
113                 
114                 if (Options::dumpSizeClasses())
115                     dataLog("    Original wastage: ", originalWastage, ", new wastage: ", newWastage, "\n");
116                 
117                 size_t betterSizeClass;
118                 if (newWastage > originalWastage)
119                     betterSizeClass = sizeClass;
120                 else
121                     betterSizeClass = possiblyBetterSizeClass;
122                 
123                 if (Options::dumpSizeClasses())
124                     dataLog("    Choosing size class: ", betterSizeClass, "\n");
125                 
126                 if (betterSizeClass == result->last()) {
127                     // Defense for when expStep is small.
128                     continue;
129                 }
130                 
131                 // This is usually how we get out of the loop.
132                 if (betterSizeClass > MarkedSpace::largeCutoff
133                     || betterSizeClass > Options::largeAllocationCutoff())
134                     break;
135                 
136                 add(betterSizeClass);
137             }
138
139             // Manually inject size classes for objects we know will be allocated in high volume.
140             // FIXME: All of these things should have IsoSubspaces.
141             // https://bugs.webkit.org/show_bug.cgi?id=179876
142             add(sizeof(UnlinkedFunctionCodeBlock));
143             add(sizeof(JSString));
144
145             {
146                 // Sort and deduplicate.
147                 std::sort(result->begin(), result->end());
148                 auto it = std::unique(result->begin(), result->end());
149                 result->shrinkCapacity(it - result->begin());
150             }
151
152             if (Options::dumpSizeClasses())
153                 dataLog("JSC Heap MarkedSpace size class dump: ", listDump(*result), "\n");
154
155             // We have an optimiation in MarkedSpace::optimalSizeFor() that assumes things about
156             // the size class table. This checks our results against that function's assumptions.
157             for (size_t size = MarkedSpace::sizeStep, i = 0; size <= MarkedSpace::preciseCutoff; size += MarkedSpace::sizeStep, i++)
158                 RELEASE_ASSERT(result->at(i) == size);
159         });
160     return *result;
161 }
162
163 template<typename TableType, typename SizeClassCons, typename DefaultCons>
164 void buildSizeClassTable(TableType& table, const SizeClassCons& cons, const DefaultCons& defaultCons)
165 {
166     size_t nextIndex = 0;
167     for (size_t sizeClass : sizeClasses()) {
168         auto entry = cons(sizeClass);
169         size_t index = MarkedSpace::sizeClassToIndex(sizeClass);
170         for (size_t i = nextIndex; i <= index; ++i)
171             table[i] = entry;
172         nextIndex = index + 1;
173     }
174     ASSERT(MarkedSpace::sizeClassToIndex(MarkedSpace::largeCutoff - 1) < MarkedSpace::numSizeClasses);
175     for (size_t i = nextIndex; i < MarkedSpace::numSizeClasses; ++i)
176         table[i] = defaultCons(MarkedSpace::indexToSizeClass(i));
177 }
178
179 } // anonymous namespace
180
181 void MarkedSpace::initializeSizeClassForStepSize()
182 {
183     static std::once_flag flag;
184     std::call_once(
185         flag,
186         [] {
187             buildSizeClassTable(
188                 s_sizeClassForSizeStep,
189                 [&] (size_t sizeClass) -> size_t {
190                     return sizeClass;
191                 },
192                 [&] (size_t sizeClass) -> size_t {
193                     return sizeClass;
194                 });
195         });
196 }
197
198 MarkedSpace::MarkedSpace(Heap* heap)
199     : m_heap(heap)
200 {
201     initializeSizeClassForStepSize();
202 }
203
204 MarkedSpace::~MarkedSpace()
205 {
206     ASSERT(!m_blocks.set().size());
207 }
208
209 void MarkedSpace::freeMemory()
210 {
211     forEachBlock(
212         [&] (MarkedBlock::Handle* block) {
213             freeBlock(block);
214         });
215     for (LargeAllocation* allocation : m_largeAllocations)
216         allocation->destroy();
217 }
218
219 void MarkedSpace::lastChanceToFinalize()
220 {
221     forEachDirectory(
222         [&] (BlockDirectory& directory) -> IterationStatus {
223             directory.lastChanceToFinalize();
224             return IterationStatus::Continue;
225         });
226     for (LargeAllocation* allocation : m_largeAllocations)
227         allocation->lastChanceToFinalize();
228 }
229
230 void MarkedSpace::sweep()
231 {
232     m_heap->sweeper().stopSweeping();
233     forEachDirectory(
234         [&] (BlockDirectory& directory) -> IterationStatus {
235             directory.sweep();
236             return IterationStatus::Continue;
237         });
238 }
239
240 void MarkedSpace::sweepLargeAllocations()
241 {
242     RELEASE_ASSERT(m_largeAllocationsNurseryOffset == m_largeAllocations.size());
243     unsigned srcIndex = m_largeAllocationsNurseryOffsetForSweep;
244     unsigned dstIndex = srcIndex;
245     while (srcIndex < m_largeAllocations.size()) {
246         LargeAllocation* allocation = m_largeAllocations[srcIndex++];
247         allocation->sweep();
248         if (allocation->isEmpty()) {
249             m_capacity -= allocation->cellSize();
250             allocation->destroy();
251             continue;
252         }
253         m_largeAllocations[dstIndex++] = allocation;
254     }
255     m_largeAllocations.shrink(dstIndex);
256     m_largeAllocationsNurseryOffset = m_largeAllocations.size();
257 }
258
259 void MarkedSpace::prepareForAllocation()
260 {
261     ASSERT(!mayBeGCThread() || m_heap->worldIsStopped());
262     for (Subspace* subspace : m_subspaces)
263         subspace->prepareForAllocation();
264
265     m_activeWeakSets.takeFrom(m_newActiveWeakSets);
266     
267     if (m_heap->collectionScope() == CollectionScope::Eden)
268         m_largeAllocationsNurseryOffsetForSweep = m_largeAllocationsNurseryOffset;
269     else
270         m_largeAllocationsNurseryOffsetForSweep = 0;
271     m_largeAllocationsNurseryOffset = m_largeAllocations.size();
272 }
273
274 void MarkedSpace::visitWeakSets(SlotVisitor& visitor)
275 {
276     auto visit = [&] (WeakSet* weakSet) {
277         weakSet->visit(visitor);
278     };
279     
280     m_newActiveWeakSets.forEach(visit);
281     
282     if (m_heap->collectionScope() == CollectionScope::Full)
283         m_activeWeakSets.forEach(visit);
284 }
285
286 void MarkedSpace::reapWeakSets()
287 {
288     auto visit = [&] (WeakSet* weakSet) {
289         weakSet->reap();
290     };
291     
292     m_newActiveWeakSets.forEach(visit);
293     
294     if (m_heap->collectionScope() == CollectionScope::Full)
295         m_activeWeakSets.forEach(visit);
296 }
297
298 void MarkedSpace::stopAllocating()
299 {
300     ASSERT(!isIterating());
301     forEachDirectory(
302         [&] (BlockDirectory& directory) -> IterationStatus {
303             directory.stopAllocating();
304             return IterationStatus::Continue;
305         });
306 }
307
308 void MarkedSpace::stopAllocatingForGood()
309 {
310     ASSERT(!isIterating());
311     forEachDirectory(
312         [&] (BlockDirectory& directory) -> IterationStatus {
313             directory.stopAllocatingForGood();
314             return IterationStatus::Continue;
315         });
316 }
317
318 void MarkedSpace::prepareForConservativeScan()
319 {
320     m_largeAllocationsForThisCollectionBegin = m_largeAllocations.begin() + m_largeAllocationsOffsetForThisCollection;
321     m_largeAllocationsForThisCollectionSize = m_largeAllocations.size() - m_largeAllocationsOffsetForThisCollection;
322     m_largeAllocationsForThisCollectionEnd = m_largeAllocations.end();
323     RELEASE_ASSERT(m_largeAllocationsForThisCollectionEnd == m_largeAllocationsForThisCollectionBegin + m_largeAllocationsForThisCollectionSize);
324     
325     std::sort(
326         m_largeAllocationsForThisCollectionBegin, m_largeAllocationsForThisCollectionEnd,
327         [&] (LargeAllocation* a, LargeAllocation* b) {
328             return a < b;
329         });
330 }
331
332 void MarkedSpace::prepareForMarking()
333 {
334     if (m_heap->collectionScope() == CollectionScope::Eden)
335         m_largeAllocationsOffsetForThisCollection = m_largeAllocationsNurseryOffset;
336     else
337         m_largeAllocationsOffsetForThisCollection = 0;
338 }
339
340 void MarkedSpace::resumeAllocating()
341 {
342     forEachDirectory(
343         [&] (BlockDirectory& directory) -> IterationStatus {
344             directory.resumeAllocating();
345             return IterationStatus::Continue;
346         });
347     // Nothing to do for LargeAllocations.
348 }
349
350 bool MarkedSpace::isPagedOut(MonotonicTime deadline)
351 {
352     bool result = false;
353     forEachDirectory(
354         [&] (BlockDirectory& directory) -> IterationStatus {
355             if (directory.isPagedOut(deadline)) {
356                 result = true;
357                 return IterationStatus::Done;
358             }
359             return IterationStatus::Continue;
360         });
361     // FIXME: Consider taking LargeAllocations into account here.
362     return result;
363 }
364
365 void MarkedSpace::freeBlock(MarkedBlock::Handle* block)
366 {
367     block->directory()->removeBlock(block);
368     m_capacity -= MarkedBlock::blockSize;
369     m_blocks.remove(&block->block());
370     delete block;
371 }
372
373 void MarkedSpace::freeOrShrinkBlock(MarkedBlock::Handle* block)
374 {
375     if (!block->isEmpty()) {
376         block->shrink();
377         return;
378     }
379
380     freeBlock(block);
381 }
382
383 void MarkedSpace::shrink()
384 {
385     forEachDirectory(
386         [&] (BlockDirectory& directory) -> IterationStatus {
387             directory.shrink();
388             return IterationStatus::Continue;
389         });
390 }
391
392 void MarkedSpace::beginMarking()
393 {
394     if (m_heap->collectionScope() == CollectionScope::Full) {
395         forEachDirectory(
396             [&] (BlockDirectory& directory) -> IterationStatus {
397                 directory.beginMarkingForFullCollection();
398                 return IterationStatus::Continue;
399             });
400
401         if (UNLIKELY(nextVersion(m_markingVersion) == initialVersion)) {
402             forEachBlock(
403                 [&] (MarkedBlock::Handle* handle) {
404                     handle->block().resetMarks();
405                 });
406         }
407         
408         m_markingVersion = nextVersion(m_markingVersion);
409         
410         for (LargeAllocation* allocation : m_largeAllocations)
411             allocation->flip();
412     }
413
414     if (!ASSERT_DISABLED) {
415         forEachBlock(
416             [&] (MarkedBlock::Handle* block) {
417                 if (block->areMarksStale())
418                     return;
419                 ASSERT(!block->isFreeListed());
420             });
421     }
422     
423     m_isMarking = true;
424 }
425
426 void MarkedSpace::endMarking()
427 {
428     if (UNLIKELY(nextVersion(m_newlyAllocatedVersion) == initialVersion)) {
429         forEachBlock(
430             [&] (MarkedBlock::Handle* handle) {
431                 handle->block().resetAllocated();
432             });
433     }
434     
435     m_newlyAllocatedVersion = nextVersion(m_newlyAllocatedVersion);
436     
437     for (unsigned i = m_largeAllocationsOffsetForThisCollection; i < m_largeAllocations.size(); ++i)
438         m_largeAllocations[i]->clearNewlyAllocated();
439
440     if (!ASSERT_DISABLED) {
441         for (LargeAllocation* allocation : m_largeAllocations)
442             ASSERT_UNUSED(allocation, !allocation->isNewlyAllocated());
443     }
444
445     forEachDirectory(
446         [&] (BlockDirectory& directory) -> IterationStatus {
447             directory.endMarking();
448             return IterationStatus::Continue;
449         });
450     
451     m_isMarking = false;
452 }
453
454 void MarkedSpace::willStartIterating()
455 {
456     ASSERT(!isIterating());
457     stopAllocating();
458     m_isIterating = true;
459 }
460
461 void MarkedSpace::didFinishIterating()
462 {
463     ASSERT(isIterating());
464     resumeAllocating();
465     m_isIterating = false;
466 }
467
468 size_t MarkedSpace::objectCount()
469 {
470     size_t result = 0;
471     forEachBlock(
472         [&] (MarkedBlock::Handle* block) {
473             result += block->markCount();
474         });
475     for (LargeAllocation* allocation : m_largeAllocations) {
476         if (allocation->isMarked())
477             result++;
478     }
479     return result;
480 }
481
482 size_t MarkedSpace::size()
483 {
484     size_t result = 0;
485     forEachBlock(
486         [&] (MarkedBlock::Handle* block) {
487             result += block->markCount() * block->cellSize();
488         });
489     for (LargeAllocation* allocation : m_largeAllocations) {
490         if (allocation->isMarked())
491             result += allocation->cellSize();
492     }
493     return result;
494 }
495
496 size_t MarkedSpace::capacity()
497 {
498     return m_capacity;
499 }
500
501 void MarkedSpace::addActiveWeakSet(WeakSet* weakSet)
502 {
503     // We conservatively assume that the WeakSet should belong in the new set. In fact, some weak
504     // sets might contain new weak handles even though they are tied to old objects. This slightly
505     // increases the amount of scanning that an eden collection would have to do, but the effect
506     // ought to be small.
507     m_newActiveWeakSets.append(weakSet);
508 }
509
510 void MarkedSpace::didAddBlock(MarkedBlock::Handle* block)
511 {
512     // WARNING: This function is called before block is fully initialized. The block will not know
513     // its cellSize() or attributes(). The latter implies that you can't ask things like
514     // needsDestruction().
515     m_capacity += MarkedBlock::blockSize;
516     m_blocks.add(&block->block());
517 }
518
519 void MarkedSpace::didAllocateInBlock(MarkedBlock::Handle* block)
520 {
521     if (block->weakSet().isOnList()) {
522         block->weakSet().remove();
523         m_newActiveWeakSets.append(&block->weakSet());
524     }
525 }
526
527 void MarkedSpace::snapshotUnswept()
528 {
529     if (m_heap->collectionScope() == CollectionScope::Eden) {
530         forEachDirectory(
531             [&] (BlockDirectory& directory) -> IterationStatus {
532                 directory.snapshotUnsweptForEdenCollection();
533                 return IterationStatus::Continue;
534             });
535     } else {
536         forEachDirectory(
537             [&] (BlockDirectory& directory) -> IterationStatus {
538                 directory.snapshotUnsweptForFullCollection();
539                 return IterationStatus::Continue;
540             });
541     }
542 }
543
544 void MarkedSpace::assertNoUnswept()
545 {
546     if (ASSERT_DISABLED)
547         return;
548     forEachDirectory(
549         [&] (BlockDirectory& directory) -> IterationStatus {
550             directory.assertNoUnswept();
551             return IterationStatus::Continue;
552         });
553 }
554
555 void MarkedSpace::dumpBits(PrintStream& out)
556 {
557     forEachDirectory(
558         [&] (BlockDirectory& directory) -> IterationStatus {
559             out.print("Bits for ", directory, ":\n");
560             directory.dumpBits(out);
561             return IterationStatus::Continue;
562         });
563 }
564
565 void MarkedSpace::addBlockDirectory(const AbstractLocker&, BlockDirectory* directory)
566 {
567     directory->setNextDirectory(nullptr);
568     
569     WTF::storeStoreFence();
570
571     m_directories.append(std::mem_fn(&BlockDirectory::setNextDirectory), directory);
572 }
573
574 } // namespace JSC