2 * Copyright (C) 2003-2018 Apple Inc. All rights reserved.
3 * Copyright (C) 2007 Eric Seidel <eric@webkit.org>
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the License, or (at your option) any later version.
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 #include "MarkedSpace.h"
24 #include "BlockDirectoryInlines.h"
25 #include "FunctionCodeBlock.h"
26 #include "IncrementalSweeper.h"
28 #include "JSCInlines.h"
29 #include "MarkedBlockInlines.h"
30 #include <wtf/ListDump.h>
34 std::array<size_t, MarkedSpace::numSizeClasses> MarkedSpace::s_sizeClassForSizeStep;
38 const Vector<size_t>& sizeClasses()
40 static Vector<size_t>* result;
41 static std::once_flag once;
45 result = new Vector<size_t>();
47 if (Options::dumpSizeClasses()) {
48 dataLog("Block size: ", MarkedBlock::blockSize, "\n");
49 dataLog("Footer size: ", sizeof(MarkedBlock::Footer), "\n");
52 auto add = [&] (size_t sizeClass) {
53 sizeClass = WTF::roundUpToMultipleOf<MarkedBlock::atomSize>(sizeClass);
54 if (Options::dumpSizeClasses())
55 dataLog("Adding JSC MarkedSpace size class: ", sizeClass, "\n");
56 // Perform some validation as we go.
57 RELEASE_ASSERT(!(sizeClass % MarkedSpace::sizeStep));
58 if (result->isEmpty())
59 RELEASE_ASSERT(sizeClass == MarkedSpace::sizeStep);
60 result->append(sizeClass);
63 // This is a definition of the size classes in our GC. It must define all of the
64 // size classes from sizeStep up to largeCutoff.
66 // Have very precise size classes for the small stuff. This is a loop to make it easy to reduce
68 for (size_t size = MarkedSpace::sizeStep; size < MarkedSpace::preciseCutoff; size += MarkedSpace::sizeStep)
71 // We want to make sure that the remaining size classes minimize internal fragmentation (i.e.
72 // the wasted space at the tail end of a MarkedBlock) while proceeding roughly in an exponential
73 // way starting at just above the precise size classes to four cells per block.
75 if (Options::dumpSizeClasses())
76 dataLog(" Marked block payload size: ", static_cast<size_t>(MarkedSpace::blockPayload), "\n");
78 for (unsigned i = 0; ; ++i) {
79 double approximateSize = MarkedSpace::preciseCutoff * pow(Options::sizeClassProgression(), i);
81 if (Options::dumpSizeClasses())
82 dataLog(" Next size class as a double: ", approximateSize, "\n");
84 size_t approximateSizeInBytes = static_cast<size_t>(approximateSize);
86 if (Options::dumpSizeClasses())
87 dataLog(" Next size class as bytes: ", approximateSizeInBytes, "\n");
89 // Make sure that the computer did the math correctly.
90 RELEASE_ASSERT(approximateSizeInBytes >= MarkedSpace::preciseCutoff);
92 if (approximateSizeInBytes > MarkedSpace::largeCutoff)
96 WTF::roundUpToMultipleOf<MarkedSpace::sizeStep>(approximateSizeInBytes);
98 if (Options::dumpSizeClasses())
99 dataLog(" Size class: ", sizeClass, "\n");
101 // Optimize the size class so that there isn't any slop at the end of the block's
103 unsigned cellsPerBlock = MarkedSpace::blockPayload / sizeClass;
104 size_t possiblyBetterSizeClass = (MarkedSpace::blockPayload / cellsPerBlock) & ~(MarkedSpace::sizeStep - 1);
106 if (Options::dumpSizeClasses())
107 dataLog(" Possibly better size class: ", possiblyBetterSizeClass, "\n");
109 // The size class we just came up with is better than the other one if it reduces
110 // total wastage assuming we only allocate cells of that size.
111 size_t originalWastage = MarkedSpace::blockPayload - cellsPerBlock * sizeClass;
112 size_t newWastage = (possiblyBetterSizeClass - sizeClass) * cellsPerBlock;
114 if (Options::dumpSizeClasses())
115 dataLog(" Original wastage: ", originalWastage, ", new wastage: ", newWastage, "\n");
117 size_t betterSizeClass;
118 if (newWastage > originalWastage)
119 betterSizeClass = sizeClass;
121 betterSizeClass = possiblyBetterSizeClass;
123 if (Options::dumpSizeClasses())
124 dataLog(" Choosing size class: ", betterSizeClass, "\n");
126 if (betterSizeClass == result->last()) {
127 // Defense for when expStep is small.
131 // This is usually how we get out of the loop.
132 if (betterSizeClass > MarkedSpace::largeCutoff
133 || betterSizeClass > Options::preciseAllocationCutoff())
136 add(betterSizeClass);
139 // Manually inject size classes for objects we know will be allocated in high volume.
140 // FIXME: All of these things should have IsoSubspaces.
141 // https://bugs.webkit.org/show_bug.cgi?id=179876
145 // Sort and deduplicate.
146 std::sort(result->begin(), result->end());
147 auto it = std::unique(result->begin(), result->end());
148 result->shrinkCapacity(it - result->begin());
151 if (Options::dumpSizeClasses())
152 dataLog("JSC Heap MarkedSpace size class dump: ", listDump(*result), "\n");
154 // We have an optimiation in MarkedSpace::optimalSizeFor() that assumes things about
155 // the size class table. This checks our results against that function's assumptions.
156 for (size_t size = MarkedSpace::sizeStep, i = 0; size <= MarkedSpace::preciseCutoff; size += MarkedSpace::sizeStep, i++)
157 RELEASE_ASSERT(result->at(i) == size);
162 template<typename TableType, typename SizeClassCons, typename DefaultCons>
163 void buildSizeClassTable(TableType& table, const SizeClassCons& cons, const DefaultCons& defaultCons)
165 size_t nextIndex = 0;
166 for (size_t sizeClass : sizeClasses()) {
167 auto entry = cons(sizeClass);
168 size_t index = MarkedSpace::sizeClassToIndex(sizeClass);
169 for (size_t i = nextIndex; i <= index; ++i)
171 nextIndex = index + 1;
173 ASSERT(MarkedSpace::sizeClassToIndex(MarkedSpace::largeCutoff - 1) < MarkedSpace::numSizeClasses);
174 for (size_t i = nextIndex; i < MarkedSpace::numSizeClasses; ++i)
175 table[i] = defaultCons(MarkedSpace::indexToSizeClass(i));
178 } // anonymous namespace
180 void MarkedSpace::initializeSizeClassForStepSize()
182 static std::once_flag flag;
187 s_sizeClassForSizeStep,
188 [&] (size_t sizeClass) -> size_t {
191 [&] (size_t sizeClass) -> size_t {
197 MarkedSpace::MarkedSpace(Heap* heap)
200 initializeSizeClassForStepSize();
203 MarkedSpace::~MarkedSpace()
205 ASSERT(!m_blocks.set().size());
208 void MarkedSpace::freeMemory()
211 [&] (MarkedBlock::Handle* block) {
214 for (PreciseAllocation* allocation : m_preciseAllocations)
215 allocation->destroy();
216 forEachSubspace([&](Subspace& subspace) {
217 if (subspace.isIsoSubspace())
218 static_cast<IsoSubspace&>(subspace).destroyLowerTierFreeList();
219 return IterationStatus::Continue;
223 void MarkedSpace::lastChanceToFinalize()
226 [&] (BlockDirectory& directory) -> IterationStatus {
227 directory.lastChanceToFinalize();
228 return IterationStatus::Continue;
230 for (PreciseAllocation* allocation : m_preciseAllocations)
231 allocation->lastChanceToFinalize();
232 // We do not call lastChanceToFinalize for lower-tier swept cells since we need nothing to do.
235 void MarkedSpace::sweep()
237 m_heap->sweeper().stopSweeping();
239 [&] (BlockDirectory& directory) -> IterationStatus {
241 return IterationStatus::Continue;
245 void MarkedSpace::sweepPreciseAllocations()
247 RELEASE_ASSERT(m_preciseAllocationsNurseryOffset == m_preciseAllocations.size());
248 unsigned srcIndex = m_preciseAllocationsNurseryOffsetForSweep;
249 unsigned dstIndex = srcIndex;
250 while (srcIndex < m_preciseAllocations.size()) {
251 PreciseAllocation* allocation = m_preciseAllocations[srcIndex++];
253 if (allocation->isEmpty()) {
254 if (auto* set = preciseAllocationSet())
255 set->remove(allocation->cell());
256 if (allocation->isLowerTier())
257 static_cast<IsoSubspace*>(allocation->subspace())->sweepLowerTierCell(allocation);
259 m_capacity -= allocation->cellSize();
260 allocation->destroy();
264 allocation->setIndexInSpace(dstIndex);
265 m_preciseAllocations[dstIndex++] = allocation;
267 m_preciseAllocations.shrink(dstIndex);
268 m_preciseAllocationsNurseryOffset = m_preciseAllocations.size();
271 void MarkedSpace::prepareForAllocation()
273 ASSERT(!Thread::mayBeGCThread() || m_heap->worldIsStopped());
274 for (Subspace* subspace : m_subspaces)
275 subspace->prepareForAllocation();
277 m_activeWeakSets.takeFrom(m_newActiveWeakSets);
279 if (m_heap->collectionScope() == CollectionScope::Eden)
280 m_preciseAllocationsNurseryOffsetForSweep = m_preciseAllocationsNurseryOffset;
282 m_preciseAllocationsNurseryOffsetForSweep = 0;
283 m_preciseAllocationsNurseryOffset = m_preciseAllocations.size();
286 void MarkedSpace::enablePreciseAllocationTracking()
288 m_preciseAllocationSet = makeUnique<HashSet<HeapCell*>>();
289 for (auto* allocation : m_preciseAllocations)
290 m_preciseAllocationSet->add(allocation->cell());
293 void MarkedSpace::visitWeakSets(SlotVisitor& visitor)
295 auto visit = [&] (WeakSet* weakSet) {
296 weakSet->visit(visitor);
299 m_newActiveWeakSets.forEach(visit);
301 if (m_heap->collectionScope() == CollectionScope::Full)
302 m_activeWeakSets.forEach(visit);
305 void MarkedSpace::reapWeakSets()
307 auto visit = [&] (WeakSet* weakSet) {
311 m_newActiveWeakSets.forEach(visit);
313 if (m_heap->collectionScope() == CollectionScope::Full)
314 m_activeWeakSets.forEach(visit);
317 void MarkedSpace::stopAllocating()
319 ASSERT(!isIterating());
321 [&] (BlockDirectory& directory) -> IterationStatus {
322 directory.stopAllocating();
323 return IterationStatus::Continue;
327 void MarkedSpace::stopAllocatingForGood()
329 ASSERT(!isIterating());
331 [&] (BlockDirectory& directory) -> IterationStatus {
332 directory.stopAllocatingForGood();
333 return IterationStatus::Continue;
337 void MarkedSpace::prepareForConservativeScan()
339 m_preciseAllocationsForThisCollectionBegin = m_preciseAllocations.begin() + m_preciseAllocationsOffsetForThisCollection;
340 m_preciseAllocationsForThisCollectionSize = m_preciseAllocations.size() - m_preciseAllocationsOffsetForThisCollection;
341 m_preciseAllocationsForThisCollectionEnd = m_preciseAllocations.end();
342 RELEASE_ASSERT(m_preciseAllocationsForThisCollectionEnd == m_preciseAllocationsForThisCollectionBegin + m_preciseAllocationsForThisCollectionSize);
345 m_preciseAllocationsForThisCollectionBegin, m_preciseAllocationsForThisCollectionEnd,
346 [&] (PreciseAllocation* a, PreciseAllocation* b) {
349 unsigned index = m_preciseAllocationsOffsetForThisCollection;
350 for (auto* start = m_preciseAllocationsForThisCollectionBegin; start != m_preciseAllocationsForThisCollectionEnd; ++start, ++index) {
351 (*start)->setIndexInSpace(index);
352 ASSERT(m_preciseAllocations[index] == *start);
353 ASSERT(m_preciseAllocations[index]->indexInSpace() == index);
357 void MarkedSpace::prepareForMarking()
359 if (m_heap->collectionScope() == CollectionScope::Eden)
360 m_preciseAllocationsOffsetForThisCollection = m_preciseAllocationsNurseryOffset;
362 m_preciseAllocationsOffsetForThisCollection = 0;
365 void MarkedSpace::resumeAllocating()
368 [&] (BlockDirectory& directory) -> IterationStatus {
369 directory.resumeAllocating();
370 return IterationStatus::Continue;
372 // Nothing to do for PreciseAllocations.
375 bool MarkedSpace::isPagedOut(MonotonicTime deadline)
379 [&] (BlockDirectory& directory) -> IterationStatus {
380 if (directory.isPagedOut(deadline)) {
382 return IterationStatus::Done;
384 return IterationStatus::Continue;
386 // FIXME: Consider taking PreciseAllocations into account here.
390 void MarkedSpace::freeBlock(MarkedBlock::Handle* block)
392 block->directory()->removeBlock(block);
393 m_capacity -= MarkedBlock::blockSize;
394 m_blocks.remove(&block->block());
398 void MarkedSpace::freeOrShrinkBlock(MarkedBlock::Handle* block)
400 if (!block->isEmpty()) {
408 void MarkedSpace::shrink()
411 [&] (BlockDirectory& directory) -> IterationStatus {
413 return IterationStatus::Continue;
417 void MarkedSpace::beginMarking()
419 if (m_heap->collectionScope() == CollectionScope::Full) {
421 [&] (BlockDirectory& directory) -> IterationStatus {
422 directory.beginMarkingForFullCollection();
423 return IterationStatus::Continue;
426 if (UNLIKELY(nextVersion(m_markingVersion) == initialVersion)) {
428 [&] (MarkedBlock::Handle* handle) {
429 handle->block().resetMarks();
433 m_markingVersion = nextVersion(m_markingVersion);
435 for (PreciseAllocation* allocation : m_preciseAllocations)
439 if (!ASSERT_DISABLED) {
441 [&] (MarkedBlock::Handle* block) {
442 if (block->areMarksStale())
444 ASSERT(!block->isFreeListed());
451 void MarkedSpace::endMarking()
453 if (UNLIKELY(nextVersion(m_newlyAllocatedVersion) == initialVersion)) {
455 [&] (MarkedBlock::Handle* handle) {
456 handle->block().resetAllocated();
460 m_newlyAllocatedVersion = nextVersion(m_newlyAllocatedVersion);
462 for (unsigned i = m_preciseAllocationsOffsetForThisCollection; i < m_preciseAllocations.size(); ++i)
463 m_preciseAllocations[i]->clearNewlyAllocated();
465 if (!ASSERT_DISABLED) {
466 for (PreciseAllocation* allocation : m_preciseAllocations)
467 ASSERT_UNUSED(allocation, !allocation->isNewlyAllocated());
471 [&] (BlockDirectory& directory) -> IterationStatus {
472 directory.endMarking();
473 return IterationStatus::Continue;
479 void MarkedSpace::willStartIterating()
481 ASSERT(!isIterating());
483 m_isIterating = true;
486 void MarkedSpace::didFinishIterating()
488 ASSERT(isIterating());
490 m_isIterating = false;
493 size_t MarkedSpace::objectCount()
497 [&] (MarkedBlock::Handle* block) {
498 result += block->markCount();
500 for (PreciseAllocation* allocation : m_preciseAllocations) {
501 if (allocation->isMarked())
507 size_t MarkedSpace::size()
511 [&] (MarkedBlock::Handle* block) {
512 result += block->markCount() * block->cellSize();
514 for (PreciseAllocation* allocation : m_preciseAllocations) {
515 if (allocation->isMarked())
516 result += allocation->cellSize();
521 size_t MarkedSpace::capacity()
526 void MarkedSpace::addActiveWeakSet(WeakSet* weakSet)
528 // We conservatively assume that the WeakSet should belong in the new set. In fact, some weak
529 // sets might contain new weak handles even though they are tied to old objects. This slightly
530 // increases the amount of scanning that an eden collection would have to do, but the effect
531 // ought to be small.
532 m_newActiveWeakSets.append(weakSet);
535 void MarkedSpace::didAddBlock(MarkedBlock::Handle* block)
537 // WARNING: This function is called before block is fully initialized. The block will not know
538 // its cellSize() or attributes(). The latter implies that you can't ask things like
539 // needsDestruction().
540 m_capacity += MarkedBlock::blockSize;
541 m_blocks.add(&block->block());
544 void MarkedSpace::didAllocateInBlock(MarkedBlock::Handle* block)
546 if (block->weakSet().isOnList()) {
547 block->weakSet().remove();
548 m_newActiveWeakSets.append(&block->weakSet());
552 void MarkedSpace::snapshotUnswept()
554 if (m_heap->collectionScope() == CollectionScope::Eden) {
556 [&] (BlockDirectory& directory) -> IterationStatus {
557 directory.snapshotUnsweptForEdenCollection();
558 return IterationStatus::Continue;
562 [&] (BlockDirectory& directory) -> IterationStatus {
563 directory.snapshotUnsweptForFullCollection();
564 return IterationStatus::Continue;
569 void MarkedSpace::assertNoUnswept()
574 [&] (BlockDirectory& directory) -> IterationStatus {
575 directory.assertNoUnswept();
576 return IterationStatus::Continue;
580 void MarkedSpace::dumpBits(PrintStream& out)
583 [&] (BlockDirectory& directory) -> IterationStatus {
584 out.print("Bits for ", directory, ":\n");
585 directory.dumpBits(out);
586 return IterationStatus::Continue;
590 void MarkedSpace::addBlockDirectory(const AbstractLocker&, BlockDirectory* directory)
592 directory->setNextDirectory(nullptr);
594 WTF::storeStoreFence();
596 m_directories.append(std::mem_fn(&BlockDirectory::setNextDirectory), directory);