GC constraint solving should be parallel
[WebKit-https.git] / Source / JavaScriptCore / heap / MarkedSpace.cpp
1 /*
2  *  Copyright (C) 2003-2017 Apple Inc. All rights reserved.
3  *  Copyright (C) 2007 Eric Seidel <eric@webkit.org>
4  *
5  *  This library is free software; you can redistribute it and/or
6  *  modify it under the terms of the GNU Lesser General Public
7  *  License as published by the Free Software Foundation; either
8  *  version 2 of the License, or (at your option) any later version.
9  *
10  *  This library is distributed in the hope that it will be useful,
11  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
12  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  *  Lesser General Public License for more details.
14  *
15  *  You should have received a copy of the GNU Lesser General Public
16  *  License along with this library; if not, write to the Free Software
17  *  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA
18  *
19  */
20
21 #include "config.h"
22 #include "MarkedSpace.h"
23
24 #include "FunctionCodeBlock.h"
25 #include "IncrementalSweeper.h"
26 #include "JSObject.h"
27 #include "JSCInlines.h"
28 #include "MarkedAllocatorInlines.h"
29 #include "MarkedBlockInlines.h"
30 #include <wtf/ListDump.h>
31
32 namespace JSC {
33
34 std::array<size_t, MarkedSpace::numSizeClasses> MarkedSpace::s_sizeClassForSizeStep;
35
36 namespace {
37
38 const Vector<size_t>& sizeClasses()
39 {
40     static Vector<size_t>* result;
41     static std::once_flag once;
42     std::call_once(
43         once,
44         [] {
45             result = new Vector<size_t>();
46             
47             auto add = [&] (size_t sizeClass) {
48                 sizeClass = WTF::roundUpToMultipleOf<MarkedBlock::atomSize>(sizeClass);
49                 if (Options::dumpSizeClasses())
50                     dataLog("Adding JSC MarkedSpace size class: ", sizeClass, "\n");
51                 // Perform some validation as we go.
52                 RELEASE_ASSERT(!(sizeClass % MarkedSpace::sizeStep));
53                 if (result->isEmpty())
54                     RELEASE_ASSERT(sizeClass == MarkedSpace::sizeStep);
55                 result->append(sizeClass);
56             };
57             
58             // This is a definition of the size classes in our GC. It must define all of the
59             // size classes from sizeStep up to largeCutoff.
60     
61             // Have very precise size classes for the small stuff. This is a loop to make it easy to reduce
62             // atomSize.
63             for (size_t size = MarkedSpace::sizeStep; size < MarkedSpace::preciseCutoff; size += MarkedSpace::sizeStep)
64                 add(size);
65             
66             // We want to make sure that the remaining size classes minimize internal fragmentation (i.e.
67             // the wasted space at the tail end of a MarkedBlock) while proceeding roughly in an exponential
68             // way starting at just above the precise size classes to four cells per block.
69             
70             if (Options::dumpSizeClasses())
71                 dataLog("    Marked block payload size: ", static_cast<size_t>(MarkedSpace::blockPayload), "\n");
72             
73             for (unsigned i = 0; ; ++i) {
74                 double approximateSize = MarkedSpace::preciseCutoff * pow(Options::sizeClassProgression(), i);
75                 
76                 if (Options::dumpSizeClasses())
77                     dataLog("    Next size class as a double: ", approximateSize, "\n");
78         
79                 size_t approximateSizeInBytes = static_cast<size_t>(approximateSize);
80         
81                 if (Options::dumpSizeClasses())
82                     dataLog("    Next size class as bytes: ", approximateSizeInBytes, "\n");
83         
84                 // Make sure that the computer did the math correctly.
85                 RELEASE_ASSERT(approximateSizeInBytes >= MarkedSpace::preciseCutoff);
86                 
87                 if (approximateSizeInBytes > MarkedSpace::largeCutoff)
88                     break;
89                 
90                 size_t sizeClass =
91                     WTF::roundUpToMultipleOf<MarkedSpace::sizeStep>(approximateSizeInBytes);
92                 
93                 if (Options::dumpSizeClasses())
94                     dataLog("    Size class: ", sizeClass, "\n");
95                 
96                 // Optimize the size class so that there isn't any slop at the end of the block's
97                 // payload.
98                 unsigned cellsPerBlock = MarkedSpace::blockPayload / sizeClass;
99                 size_t possiblyBetterSizeClass = (MarkedSpace::blockPayload / cellsPerBlock) & ~(MarkedSpace::sizeStep - 1);
100                 
101                 if (Options::dumpSizeClasses())
102                     dataLog("    Possibly better size class: ", possiblyBetterSizeClass, "\n");
103
104                 // The size class we just came up with is better than the other one if it reduces
105                 // total wastage assuming we only allocate cells of that size.
106                 size_t originalWastage = MarkedSpace::blockPayload - cellsPerBlock * sizeClass;
107                 size_t newWastage = (possiblyBetterSizeClass - sizeClass) * cellsPerBlock;
108                 
109                 if (Options::dumpSizeClasses())
110                     dataLog("    Original wastage: ", originalWastage, ", new wastage: ", newWastage, "\n");
111                 
112                 size_t betterSizeClass;
113                 if (newWastage > originalWastage)
114                     betterSizeClass = sizeClass;
115                 else
116                     betterSizeClass = possiblyBetterSizeClass;
117                 
118                 if (Options::dumpSizeClasses())
119                     dataLog("    Choosing size class: ", betterSizeClass, "\n");
120                 
121                 if (betterSizeClass == result->last()) {
122                     // Defense for when expStep is small.
123                     continue;
124                 }
125                 
126                 // This is usually how we get out of the loop.
127                 if (betterSizeClass > MarkedSpace::largeCutoff
128                     || betterSizeClass > Options::largeAllocationCutoff())
129                     break;
130                 
131                 add(betterSizeClass);
132             }
133
134             // Manually inject size classes for objects we know will be allocated in high volume.
135             // FIXME: All of these things should have IsoSubspaces.
136             // https://bugs.webkit.org/show_bug.cgi?id=179876
137             add(sizeof(UnlinkedFunctionExecutable));
138             add(sizeof(UnlinkedFunctionCodeBlock));
139             add(sizeof(FunctionCodeBlock));
140             add(sizeof(JSString));
141             add(sizeof(JSFunction));
142             add(sizeof(PropertyTable));
143             add(sizeof(Structure));
144
145             {
146                 // Sort and deduplicate.
147                 std::sort(result->begin(), result->end());
148                 auto it = std::unique(result->begin(), result->end());
149                 result->shrinkCapacity(it - result->begin());
150             }
151
152             if (Options::dumpSizeClasses())
153                 dataLog("JSC Heap MarkedSpace size class dump: ", listDump(*result), "\n");
154
155             // We have an optimiation in MarkedSpace::optimalSizeFor() that assumes things about
156             // the size class table. This checks our results against that function's assumptions.
157             for (size_t size = MarkedSpace::sizeStep, i = 0; size <= MarkedSpace::preciseCutoff; size += MarkedSpace::sizeStep, i++)
158                 RELEASE_ASSERT(result->at(i) == size);
159         });
160     return *result;
161 }
162
163 template<typename TableType, typename SizeClassCons, typename DefaultCons>
164 void buildSizeClassTable(TableType& table, const SizeClassCons& cons, const DefaultCons& defaultCons)
165 {
166     size_t nextIndex = 0;
167     for (size_t sizeClass : sizeClasses()) {
168         auto entry = cons(sizeClass);
169         size_t index = MarkedSpace::sizeClassToIndex(sizeClass);
170         for (size_t i = nextIndex; i <= index; ++i)
171             table[i] = entry;
172         nextIndex = index + 1;
173     }
174     for (size_t i = nextIndex; i < MarkedSpace::numSizeClasses; ++i)
175         table[i] = defaultCons(MarkedSpace::indexToSizeClass(i));
176 }
177
178 } // anonymous namespace
179
180 void MarkedSpace::initializeSizeClassForStepSize()
181 {
182     static std::once_flag flag;
183     std::call_once(
184         flag,
185         [] {
186             buildSizeClassTable(
187                 s_sizeClassForSizeStep,
188                 [&] (size_t sizeClass) -> size_t {
189                     return sizeClass;
190                 },
191                 [&] (size_t sizeClass) -> size_t {
192                     return sizeClass;
193                 });
194         });
195 }
196
197 MarkedSpace::MarkedSpace(Heap* heap)
198     : m_heap(heap)
199     , m_capacity(0)
200     , m_isIterating(false)
201 {
202     initializeSizeClassForStepSize();
203 }
204
205 MarkedSpace::~MarkedSpace()
206 {
207     ASSERT(!m_blocks.set().size());
208 }
209
210 void MarkedSpace::freeMemory()
211 {
212     forEachBlock(
213         [&] (MarkedBlock::Handle* block) {
214             freeBlock(block);
215         });
216     for (LargeAllocation* allocation : m_largeAllocations)
217         allocation->destroy();
218 }
219
220 void MarkedSpace::lastChanceToFinalize()
221 {
222     forEachAllocator(
223         [&] (MarkedAllocator& allocator) -> IterationStatus {
224             allocator.lastChanceToFinalize();
225             return IterationStatus::Continue;
226         });
227     for (LargeAllocation* allocation : m_largeAllocations)
228         allocation->lastChanceToFinalize();
229 }
230
231 void MarkedSpace::sweep()
232 {
233     m_heap->sweeper().stopSweeping();
234     forEachAllocator(
235         [&] (MarkedAllocator& allocator) -> IterationStatus {
236             allocator.sweep();
237             return IterationStatus::Continue;
238         });
239 }
240
241 void MarkedSpace::sweepLargeAllocations()
242 {
243     RELEASE_ASSERT(m_largeAllocationsNurseryOffset == m_largeAllocations.size());
244     unsigned srcIndex = m_largeAllocationsNurseryOffsetForSweep;
245     unsigned dstIndex = srcIndex;
246     while (srcIndex < m_largeAllocations.size()) {
247         LargeAllocation* allocation = m_largeAllocations[srcIndex++];
248         allocation->sweep();
249         if (allocation->isEmpty()) {
250             m_capacity -= allocation->cellSize();
251             allocation->destroy();
252             continue;
253         }
254         m_largeAllocations[dstIndex++] = allocation;
255     }
256     m_largeAllocations.shrink(dstIndex);
257     m_largeAllocationsNurseryOffset = m_largeAllocations.size();
258 }
259
260 void MarkedSpace::prepareForAllocation()
261 {
262     for (Subspace* subspace : m_subspaces)
263         subspace->prepareForAllocation();
264
265     m_activeWeakSets.takeFrom(m_newActiveWeakSets);
266     
267     if (m_heap->collectionScope() == CollectionScope::Eden)
268         m_largeAllocationsNurseryOffsetForSweep = m_largeAllocationsNurseryOffset;
269     else
270         m_largeAllocationsNurseryOffsetForSweep = 0;
271     m_largeAllocationsNurseryOffset = m_largeAllocations.size();
272 }
273
274 void MarkedSpace::visitWeakSets(SlotVisitor& visitor)
275 {
276     auto visit = [&] (WeakSet* weakSet) {
277         weakSet->visit(visitor);
278     };
279     
280     m_newActiveWeakSets.forEach(visit);
281     
282     if (m_heap->collectionScope() == CollectionScope::Full)
283         m_activeWeakSets.forEach(visit);
284 }
285
286 void MarkedSpace::reapWeakSets()
287 {
288     auto visit = [&] (WeakSet* weakSet) {
289         weakSet->reap();
290     };
291     
292     m_newActiveWeakSets.forEach(visit);
293     
294     if (m_heap->collectionScope() == CollectionScope::Full)
295         m_activeWeakSets.forEach(visit);
296 }
297
298 void MarkedSpace::stopAllocating()
299 {
300     ASSERT(!isIterating());
301     forEachAllocator(
302         [&] (MarkedAllocator& allocator) -> IterationStatus {
303             allocator.stopAllocating();
304             return IterationStatus::Continue;
305         });
306 }
307
308 void MarkedSpace::prepareForConservativeScan()
309 {
310     m_largeAllocationsForThisCollectionBegin = m_largeAllocations.begin() + m_largeAllocationsOffsetForThisCollection;
311     m_largeAllocationsForThisCollectionSize = m_largeAllocations.size() - m_largeAllocationsOffsetForThisCollection;
312     m_largeAllocationsForThisCollectionEnd = m_largeAllocations.end();
313     RELEASE_ASSERT(m_largeAllocationsForThisCollectionEnd == m_largeAllocationsForThisCollectionBegin + m_largeAllocationsForThisCollectionSize);
314     
315     std::sort(
316         m_largeAllocationsForThisCollectionBegin, m_largeAllocationsForThisCollectionEnd,
317         [&] (LargeAllocation* a, LargeAllocation* b) {
318             return a < b;
319         });
320 }
321
322 void MarkedSpace::prepareForMarking()
323 {
324     if (m_heap->collectionScope() == CollectionScope::Eden)
325         m_largeAllocationsOffsetForThisCollection = m_largeAllocationsNurseryOffset;
326     else
327         m_largeAllocationsOffsetForThisCollection = 0;
328 }
329
330 void MarkedSpace::resumeAllocating()
331 {
332     forEachAllocator(
333         [&] (MarkedAllocator& allocator) -> IterationStatus {
334             allocator.resumeAllocating();
335             return IterationStatus::Continue;
336         });
337     // Nothing to do for LargeAllocations.
338 }
339
340 bool MarkedSpace::isPagedOut(double deadline)
341 {
342     bool result = false;
343     forEachAllocator(
344         [&] (MarkedAllocator& allocator) -> IterationStatus {
345             if (allocator.isPagedOut(deadline)) {
346                 result = true;
347                 return IterationStatus::Done;
348             }
349             return IterationStatus::Continue;
350         });
351     // FIXME: Consider taking LargeAllocations into account here.
352     return result;
353 }
354
355 void MarkedSpace::freeBlock(MarkedBlock::Handle* block)
356 {
357     block->allocator()->removeBlock(block);
358     m_capacity -= MarkedBlock::blockSize;
359     m_blocks.remove(&block->block());
360     delete block;
361 }
362
363 void MarkedSpace::freeOrShrinkBlock(MarkedBlock::Handle* block)
364 {
365     if (!block->isEmpty()) {
366         block->shrink();
367         return;
368     }
369
370     freeBlock(block);
371 }
372
373 void MarkedSpace::shrink()
374 {
375     forEachAllocator(
376         [&] (MarkedAllocator& allocator) -> IterationStatus {
377             allocator.shrink();
378             return IterationStatus::Continue;
379         });
380 }
381
382 void MarkedSpace::beginMarking()
383 {
384     if (m_heap->collectionScope() == CollectionScope::Full) {
385         forEachAllocator(
386             [&] (MarkedAllocator& allocator) -> IterationStatus {
387                 allocator.beginMarkingForFullCollection();
388                 return IterationStatus::Continue;
389             });
390
391         if (UNLIKELY(nextVersion(m_markingVersion) == initialVersion)) {
392             forEachBlock(
393                 [&] (MarkedBlock::Handle* handle) {
394                     handle->block().resetMarks();
395                 });
396         }
397         
398         m_markingVersion = nextVersion(m_markingVersion);
399         
400         for (LargeAllocation* allocation : m_largeAllocations)
401             allocation->flip();
402     }
403
404     if (!ASSERT_DISABLED) {
405         forEachBlock(
406             [&] (MarkedBlock::Handle* block) {
407                 if (block->areMarksStale())
408                     return;
409                 ASSERT(!block->isFreeListed());
410             });
411     }
412     
413     m_isMarking = true;
414 }
415
416 void MarkedSpace::endMarking()
417 {
418     if (UNLIKELY(nextVersion(m_newlyAllocatedVersion) == initialVersion)) {
419         forEachBlock(
420             [&] (MarkedBlock::Handle* handle) {
421                 handle->resetAllocated();
422             });
423     }
424     
425     m_newlyAllocatedVersion = nextVersion(m_newlyAllocatedVersion);
426     
427     for (unsigned i = m_largeAllocationsOffsetForThisCollection; i < m_largeAllocations.size(); ++i)
428         m_largeAllocations[i]->clearNewlyAllocated();
429
430     if (!ASSERT_DISABLED) {
431         for (LargeAllocation* allocation : m_largeAllocations)
432             ASSERT_UNUSED(allocation, !allocation->isNewlyAllocated());
433     }
434
435     forEachAllocator(
436         [&] (MarkedAllocator& allocator) -> IterationStatus {
437             allocator.endMarking();
438             return IterationStatus::Continue;
439         });
440     
441     m_isMarking = false;
442 }
443
444 void MarkedSpace::willStartIterating()
445 {
446     ASSERT(!isIterating());
447     stopAllocating();
448     m_isIterating = true;
449 }
450
451 void MarkedSpace::didFinishIterating()
452 {
453     ASSERT(isIterating());
454     resumeAllocating();
455     m_isIterating = false;
456 }
457
458 size_t MarkedSpace::objectCount()
459 {
460     size_t result = 0;
461     forEachBlock(
462         [&] (MarkedBlock::Handle* block) {
463             result += block->markCount();
464         });
465     for (LargeAllocation* allocation : m_largeAllocations) {
466         if (allocation->isMarked())
467             result++;
468     }
469     return result;
470 }
471
472 size_t MarkedSpace::size()
473 {
474     size_t result = 0;
475     forEachBlock(
476         [&] (MarkedBlock::Handle* block) {
477             result += block->markCount() * block->cellSize();
478         });
479     for (LargeAllocation* allocation : m_largeAllocations) {
480         if (allocation->isMarked())
481             result += allocation->cellSize();
482     }
483     return result;
484 }
485
486 size_t MarkedSpace::capacity()
487 {
488     return m_capacity;
489 }
490
491 void MarkedSpace::addActiveWeakSet(WeakSet* weakSet)
492 {
493     // We conservatively assume that the WeakSet should belong in the new set. In fact, some weak
494     // sets might contain new weak handles even though they are tied to old objects. This slightly
495     // increases the amount of scanning that an eden collection would have to do, but the effect
496     // ought to be small.
497     m_newActiveWeakSets.append(weakSet);
498 }
499
500 void MarkedSpace::didAddBlock(MarkedBlock::Handle* block)
501 {
502     // WARNING: This function is called before block is fully initialized. The block will not know
503     // its cellSize() or attributes(). The latter implies that you can't ask things like
504     // needsDestruction().
505     m_capacity += MarkedBlock::blockSize;
506     m_blocks.add(&block->block());
507 }
508
509 void MarkedSpace::didAllocateInBlock(MarkedBlock::Handle* block)
510 {
511     if (block->weakSet().isOnList()) {
512         block->weakSet().remove();
513         m_newActiveWeakSets.append(&block->weakSet());
514     }
515 }
516
517 void MarkedSpace::snapshotUnswept()
518 {
519     if (m_heap->collectionScope() == CollectionScope::Eden) {
520         forEachAllocator(
521             [&] (MarkedAllocator& allocator) -> IterationStatus {
522                 allocator.snapshotUnsweptForEdenCollection();
523                 return IterationStatus::Continue;
524             });
525     } else {
526         forEachAllocator(
527             [&] (MarkedAllocator& allocator) -> IterationStatus {
528                 allocator.snapshotUnsweptForFullCollection();
529                 return IterationStatus::Continue;
530             });
531     }
532 }
533
534 void MarkedSpace::assertNoUnswept()
535 {
536     if (ASSERT_DISABLED)
537         return;
538     forEachAllocator(
539         [&] (MarkedAllocator& allocator) -> IterationStatus {
540             allocator.assertNoUnswept();
541             return IterationStatus::Continue;
542         });
543 }
544
545 void MarkedSpace::dumpBits(PrintStream& out)
546 {
547     forEachAllocator(
548         [&] (MarkedAllocator& allocator) -> IterationStatus {
549             out.print("Bits for ", allocator, ":\n");
550             allocator.dumpBits(out);
551             return IterationStatus::Continue;
552         });
553 }
554
555 void MarkedSpace::addMarkedAllocator(const AbstractLocker&, MarkedAllocator* allocator)
556 {
557     allocator->setNextAllocator(nullptr);
558     
559     WTF::storeStoreFence();
560
561     m_allocators.append(std::mem_fn(&MarkedAllocator::setNextAllocator), allocator);
562 }
563
564 } // namespace JSC