Heap::isMarked() shouldn't pay the price of concurrent lazy flipping
[WebKit-https.git] / Source / JavaScriptCore / heap / MarkedBlock.cpp
1 /*
2  * Copyright (C) 2011, 2016 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
14  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
15  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
17  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
18  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
19  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
20  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
21  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
22  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
23  * THE POSSIBILITY OF SUCH DAMAGE.
24  */
25
26 #include "config.h"
27 #include "MarkedBlock.h"
28
29 #include "JSCell.h"
30 #include "JSDestructibleObject.h"
31 #include "JSCInlines.h"
32 #include "SuperSampler.h"
33
34 namespace JSC {
35
36 static const bool computeBalance = false;
37 static size_t balance;
38
39 MarkedBlock::Handle* MarkedBlock::tryCreate(Heap& heap, MarkedAllocator* allocator, size_t cellSize, const AllocatorAttributes& attributes)
40 {
41     if (computeBalance) {
42         balance++;
43         if (!(balance % 10))
44             dataLog("MarkedBlock Balance: ", balance, "\n");
45     }
46     void* blockSpace = tryFastAlignedMalloc(blockSize, blockSize);
47     if (!blockSpace)
48         return nullptr;
49     if (scribbleFreeCells())
50         scribble(blockSpace, blockSize);
51     return new Handle(heap, allocator, cellSize, attributes, blockSpace);
52 }
53
54 MarkedBlock::Handle::Handle(Heap& heap, MarkedAllocator* allocator, size_t cellSize, const AllocatorAttributes& attributes, void* blockSpace)
55     : m_atomsPerCell((cellSize + atomSize - 1) / atomSize)
56     , m_endAtom(atomsPerBlock - m_atomsPerCell + 1)
57     , m_attributes(attributes)
58     , m_state(New) // All cells start out unmarked.
59     , m_allocator(allocator)
60     , m_weakSet(allocator->heap()->vm(), CellContainer())
61 {
62     m_block = new (NotNull, blockSpace) MarkedBlock(*heap.vm(), *this);
63     
64     m_weakSet.setContainer(*m_block);
65     
66     heap.didAllocateBlock(blockSize);
67     HEAP_LOG_BLOCK_STATE_TRANSITION(this);
68     ASSERT(allocator);
69     if (m_attributes.cellKind != HeapCell::JSCell)
70         RELEASE_ASSERT(m_attributes.destruction == DoesNotNeedDestruction);
71 }
72
73 MarkedBlock::Handle::~Handle()
74 {
75     Heap& heap = *this->heap();
76     if (computeBalance) {
77         balance--;
78         if (!(balance % 10))
79             dataLog("MarkedBlock Balance: ", balance, "\n");
80     }
81     m_block->~MarkedBlock();
82     fastAlignedFree(m_block);
83     heap.didFreeBlock(blockSize);
84 }
85
86 MarkedBlock::MarkedBlock(VM& vm, Handle& handle)
87     : m_needsDestruction(handle.needsDestruction())
88     , m_version(vm.heap.objectSpace().version())
89     , m_handle(handle)
90     , m_vm(&vm)
91 {
92     unsigned cellsPerBlock = MarkedSpace::blockPayload / handle.cellSize();
93     double markCountBias = -(Options::minMarkedBlockUtilization() * cellsPerBlock);
94     
95     // The mark count bias should be comfortably within this range.
96     RELEASE_ASSERT(markCountBias > static_cast<double>(std::numeric_limits<int16_t>::min()));
97     RELEASE_ASSERT(markCountBias < 0);
98     
99     m_markCountBias = static_cast<int16_t>(markCountBias);
100     
101     m_biasedMarkCount = m_markCountBias; // This means we haven't marked anything yet.
102 }
103
104 template<MarkedBlock::BlockState blockState, MarkedBlock::Handle::SweepMode sweepMode, DestructionMode destructionMode, MarkedBlock::Handle::ScribbleMode scribbleMode, MarkedBlock::Handle::NewlyAllocatedMode newlyAllocatedMode>
105 FreeList MarkedBlock::Handle::specializedSweep()
106 {
107     SuperSamplerScope superSamplerScope(false);
108     ASSERT(blockState == New || blockState == Marked);
109     ASSERT(!(destructionMode == DoesNotNeedDestruction && sweepMode == SweepOnly));
110     
111     assertFlipped();
112     MarkedBlock& block = this->block();
113     
114     bool isNewBlock = blockState == New;
115     bool isEmptyBlock = !block.hasAnyMarked()
116         && newlyAllocatedMode == DoesNotHaveNewlyAllocated
117         && destructionMode == DoesNotNeedDestruction;
118     if (Options::useBumpAllocator() && (isNewBlock || isEmptyBlock)) {
119         ASSERT(block.m_marks.isEmpty());
120         
121         char* startOfLastCell = static_cast<char*>(cellAlign(block.atoms() + m_endAtom - 1));
122         char* payloadEnd = startOfLastCell + cellSize();
123         RELEASE_ASSERT(payloadEnd - MarkedBlock::blockSize <= bitwise_cast<char*>(&block));
124         char* payloadBegin = bitwise_cast<char*>(block.atoms() + firstAtom());
125         if (scribbleMode == Scribble)
126             scribble(payloadBegin, payloadEnd - payloadBegin);
127         m_state = ((sweepMode == SweepToFreeList) ? FreeListed : Marked);
128         FreeList result = FreeList::bump(payloadEnd, payloadEnd - payloadBegin);
129         if (false)
130             dataLog("Quickly swept block ", RawPointer(this), " with cell size ", cellSize(), " and attributes ", m_attributes, ": ", result, "\n");
131         return result;
132     }
133
134     // This produces a free list that is ordered in reverse through the block.
135     // This is fine, since the allocation code makes no assumptions about the
136     // order of the free list.
137     FreeCell* head = 0;
138     size_t count = 0;
139     for (size_t i = firstAtom(); i < m_endAtom; i += m_atomsPerCell) {
140         if (blockState == Marked
141             && (block.m_marks.get(i)
142                 || (newlyAllocatedMode == HasNewlyAllocated && m_newlyAllocated->get(i))))
143             continue;
144
145         HeapCell* cell = reinterpret_cast_ptr<HeapCell*>(&block.atoms()[i]);
146
147         if (destructionMode == NeedsDestruction && blockState != New)
148             static_cast<JSCell*>(cell)->callDestructor(*vm());
149
150         if (sweepMode == SweepToFreeList) {
151             FreeCell* freeCell = reinterpret_cast<FreeCell*>(cell);
152             if (scribbleMode == Scribble)
153                 scribble(freeCell, cellSize());
154             freeCell->next = head;
155             head = freeCell;
156             ++count;
157         }
158     }
159
160     // We only want to discard the newlyAllocated bits if we're creating a FreeList,
161     // otherwise we would lose information on what's currently alive.
162     if (sweepMode == SweepToFreeList && newlyAllocatedMode == HasNewlyAllocated)
163         m_newlyAllocated = nullptr;
164
165     FreeList result = FreeList::list(head, count * cellSize());
166     m_state = (sweepMode == SweepToFreeList ? FreeListed : Marked);
167     if (false)
168         dataLog("Slowly swept block ", RawPointer(&block), " with cell size ", cellSize(), " and attributes ", m_attributes, ": ", result, "\n");
169     return result;
170 }
171
172 FreeList MarkedBlock::Handle::sweep(SweepMode sweepMode)
173 {
174     flipIfNecessary();
175     
176     HEAP_LOG_BLOCK_STATE_TRANSITION(this);
177
178     m_weakSet.sweep();
179
180     if (sweepMode == SweepOnly && m_attributes.destruction == DoesNotNeedDestruction)
181         return FreeList();
182
183     if (m_attributes.destruction == NeedsDestruction)
184         return sweepHelperSelectScribbleMode<NeedsDestruction>(sweepMode);
185     return sweepHelperSelectScribbleMode<DoesNotNeedDestruction>(sweepMode);
186 }
187
188 template<DestructionMode destructionMode>
189 FreeList MarkedBlock::Handle::sweepHelperSelectScribbleMode(SweepMode sweepMode)
190 {
191     if (scribbleFreeCells())
192         return sweepHelperSelectStateAndSweepMode<destructionMode, Scribble>(sweepMode);
193     return sweepHelperSelectStateAndSweepMode<destructionMode, DontScribble>(sweepMode);
194 }
195
196 template<DestructionMode destructionMode, MarkedBlock::Handle::ScribbleMode scribbleMode>
197 FreeList MarkedBlock::Handle::sweepHelperSelectStateAndSweepMode(SweepMode sweepMode)
198 {
199     switch (m_state) {
200     case New:
201         ASSERT(sweepMode == SweepToFreeList);
202         return specializedSweep<New, SweepToFreeList, destructionMode, scribbleMode, DoesNotHaveNewlyAllocated>();
203     case FreeListed:
204         // Happens when a block transitions to fully allocated.
205         ASSERT(sweepMode == SweepToFreeList);
206         return FreeList();
207     case Allocated:
208         RELEASE_ASSERT_NOT_REACHED();
209         return FreeList();
210     case Marked:
211         if (m_newlyAllocated) {
212             return sweepMode == SweepToFreeList
213                 ? specializedSweep<Marked, SweepToFreeList, destructionMode, scribbleMode, HasNewlyAllocated>()
214                 : specializedSweep<Marked, SweepOnly, destructionMode, scribbleMode, HasNewlyAllocated>();
215         } else {
216             return sweepMode == SweepToFreeList
217                 ? specializedSweep<Marked, SweepToFreeList, destructionMode, scribbleMode, DoesNotHaveNewlyAllocated>()
218                 : specializedSweep<Marked, SweepOnly, destructionMode, scribbleMode, DoesNotHaveNewlyAllocated>();
219         }
220     }
221     RELEASE_ASSERT_NOT_REACHED();
222     return FreeList();
223 }
224
225 void MarkedBlock::Handle::unsweepWithNoNewlyAllocated()
226 {
227     flipIfNecessary();
228     
229     HEAP_LOG_BLOCK_STATE_TRANSITION(this);
230     
231     RELEASE_ASSERT(m_state == FreeListed);
232     m_state = Marked;
233 }
234
235 class SetNewlyAllocatedFunctor : public MarkedBlock::VoidFunctor {
236 public:
237     SetNewlyAllocatedFunctor(MarkedBlock::Handle* block)
238         : m_block(block)
239     {
240     }
241
242     IterationStatus operator()(HeapCell* cell, HeapCell::Kind) const
243     {
244         ASSERT(MarkedBlock::blockFor(cell) == &m_block->block());
245         m_block->setNewlyAllocated(cell);
246         return IterationStatus::Continue;
247     }
248
249 private:
250     MarkedBlock::Handle* m_block;
251 };
252
253 void MarkedBlock::Handle::stopAllocating(const FreeList& freeList)
254 {
255     flipIfNecessary();
256     HEAP_LOG_BLOCK_STATE_TRANSITION(this);
257
258     if (m_state == Marked) {
259         // If the block is in the Marked state then we know that one of these
260         // conditions holds:
261         //
262         // - It was not used for allocation during the previous allocation cycle.
263         //   It may have dead objects, and we only know them to be dead by the
264         //   fact that their mark bits are unset.
265         //
266         // - Someone had already done stopAllocating(), for example because of
267         //   heap iteration, and they had already 
268         // Hence if the block is Marked we need to leave it Marked.
269         ASSERT(freeList.allocationWillFail());
270         return;
271     }
272     
273     ASSERT(m_state == FreeListed);
274     
275     // Roll back to a coherent state for Heap introspection. Cells newly
276     // allocated from our free list are not currently marked, so we need another
277     // way to tell what's live vs dead. 
278     
279     ASSERT(!m_newlyAllocated);
280     m_newlyAllocated = std::make_unique<WTF::Bitmap<atomsPerBlock>>();
281
282     SetNewlyAllocatedFunctor functor(this);
283     forEachCell(functor);
284
285     forEachFreeCell(
286         freeList,
287         [&] (HeapCell* cell) {
288             if (m_attributes.destruction == NeedsDestruction)
289                 cell->zap();
290             clearNewlyAllocated(cell);
291         });
292     
293     m_state = Marked;
294 }
295
296 void MarkedBlock::Handle::lastChanceToFinalize()
297 {
298     m_block->clearMarks();
299     m_weakSet.lastChanceToFinalize();
300
301     clearNewlyAllocated();
302     sweep();
303 }
304
305 FreeList MarkedBlock::Handle::resumeAllocating()
306 {
307     flipIfNecessary();
308     HEAP_LOG_BLOCK_STATE_TRANSITION(this);
309
310     ASSERT(m_state == Marked);
311
312     if (!m_newlyAllocated) {
313         // We didn't have to create a "newly allocated" bitmap. That means we were already Marked
314         // when we last stopped allocation, so return an empty free list and stay in the Marked state.
315         return FreeList();
316     }
317
318     // Re-create our free list from before stopping allocation. Note that this may return an empty
319     // freelist, in which case the block will still be Marked!
320     return sweep(SweepToFreeList);
321 }
322
323 void MarkedBlock::Handle::zap(const FreeList& freeList)
324 {
325     forEachFreeCell(
326         freeList,
327         [&] (HeapCell* cell) {
328             if (m_attributes.destruction == NeedsDestruction)
329                 cell->zap();
330         });
331 }
332
333 template<typename Func>
334 void MarkedBlock::Handle::forEachFreeCell(const FreeList& freeList, const Func& func)
335 {
336     if (freeList.remaining) {
337         for (unsigned remaining = freeList.remaining; remaining; remaining -= cellSize())
338             func(bitwise_cast<HeapCell*>(freeList.payloadEnd - remaining));
339     } else {
340         for (FreeCell* current = freeList.head; current;) {
341             FreeCell* next = current->next;
342             func(bitwise_cast<HeapCell*>(current));
343             current = next;
344         }
345     }
346 }
347
348 void MarkedBlock::flipIfNecessary()
349 {
350     flipIfNecessary(vm()->heap.objectSpace().version());
351 }
352
353 void MarkedBlock::Handle::flipIfNecessary()
354 {
355     block().flipIfNecessary();
356 }
357
358 void MarkedBlock::flipIfNecessarySlow()
359 {
360     ASSERT(needsFlip());
361     clearMarks();
362 }
363
364 void MarkedBlock::flipIfNecessaryConcurrentlySlow()
365 {
366     LockHolder locker(m_lock);
367     if (needsFlip())
368         clearMarks();
369 }
370
371 void MarkedBlock::clearMarks()
372 {
373     m_marks.clearAll();
374     clearHasAnyMarked();
375     // This will become true at the end of the mark phase. We set it now to
376     // avoid an extra pass to do so later.
377     handle().m_state = Marked;
378     WTF::storeStoreFence();
379     m_version = vm()->heap.objectSpace().version();
380 }
381
382 #if !ASSERT_DISABLED
383 void MarkedBlock::assertFlipped()
384 {
385     ASSERT(m_version == vm()->heap.objectSpace().version());
386 }
387 #endif // !ASSERT_DISABLED
388
389 bool MarkedBlock::needsFlip()
390 {
391     return needsFlip(vm()->heap.objectSpace().version());
392 }
393
394 bool MarkedBlock::Handle::needsFlip()
395 {
396     return m_block->needsFlip();
397 }
398
399 void MarkedBlock::Handle::willRemoveBlock()
400 {
401     flipIfNecessary();
402 }
403
404 void MarkedBlock::Handle::didConsumeFreeList()
405 {
406     flipIfNecessary();
407     HEAP_LOG_BLOCK_STATE_TRANSITION(this);
408     
409     ASSERT(m_state == FreeListed);
410
411     m_state = Allocated;
412 }
413
414 size_t MarkedBlock::markCount()
415 {
416     flipIfNecessary();
417     return m_marks.count();
418 }
419
420 bool MarkedBlock::Handle::isEmpty()
421 {
422     flipIfNecessary();
423     return m_state == Marked && !block().hasAnyMarked() && m_weakSet.isEmpty() && (!m_newlyAllocated || m_newlyAllocated->isEmpty());
424 }
425
426 void MarkedBlock::clearHasAnyMarked()
427 {
428     m_biasedMarkCount = m_markCountBias;
429 }
430
431 void MarkedBlock::noteMarkedSlow()
432 {
433     handle().m_allocator->retire(&handle());
434 }
435
436 } // namespace JSC
437
438 namespace WTF {
439
440 using namespace JSC;
441
442 void printInternal(PrintStream& out, MarkedBlock::BlockState blockState)
443 {
444     switch (blockState) {
445     case MarkedBlock::New:
446         out.print("New");
447         return;
448     case MarkedBlock::FreeListed:
449         out.print("FreeListed");
450         return;
451     case MarkedBlock::Allocated:
452         out.print("Allocated");
453         return;
454     case MarkedBlock::Marked:
455         out.print("Marked");
456         return;
457     }
458     RELEASE_ASSERT_NOT_REACHED();
459 }
460
461 } // namespace WTF
462