De-virtualize destructors
[WebKit-https.git] / Source / JavaScriptCore / heap / MarkStack.cpp
1 /*
2  * Copyright (C) 2009, 2011 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "MarkStack.h"
28
29 #include "ConservativeRoots.h"
30 #include "Heap.h"
31 #include "Options.h"
32 #include "JSArray.h"
33 #include "JSCell.h"
34 #include "JSObject.h"
35 #include "ScopeChain.h"
36 #include "Structure.h"
37 #include "WriteBarrier.h"
38 #include <wtf/MainThread.h>
39
40 namespace JSC {
41
42 MarkStackSegmentAllocator::MarkStackSegmentAllocator()
43     : m_nextFreeSegment(0)
44 {
45 }
46
47 MarkStackSegmentAllocator::~MarkStackSegmentAllocator()
48 {
49     shrinkReserve();
50 }
51
52 MarkStackSegment* MarkStackSegmentAllocator::allocate()
53 {
54     {
55         MutexLocker locker(m_lock);
56         if (m_nextFreeSegment) {
57             MarkStackSegment* result = m_nextFreeSegment;
58             m_nextFreeSegment = result->m_previous;
59             return result;
60         }
61     }
62
63     return static_cast<MarkStackSegment*>(OSAllocator::reserveAndCommit(Options::gcMarkStackSegmentSize));
64 }
65
66 void MarkStackSegmentAllocator::release(MarkStackSegment* segment)
67 {
68     MutexLocker locker(m_lock);
69     segment->m_previous = m_nextFreeSegment;
70     m_nextFreeSegment = segment;
71 }
72
73 void MarkStackSegmentAllocator::shrinkReserve()
74 {
75     MarkStackSegment* segments;
76     {
77         MutexLocker locker(m_lock);
78         segments = m_nextFreeSegment;
79         m_nextFreeSegment = 0;
80     }
81     while (segments) {
82         MarkStackSegment* toFree = segments;
83         segments = segments->m_previous;
84         OSAllocator::decommitAndRelease(toFree, Options::gcMarkStackSegmentSize);
85     }
86 }
87
88 MarkStackArray::MarkStackArray(MarkStackSegmentAllocator& allocator)
89     : m_allocator(allocator)
90     , m_segmentCapacity(MarkStackSegment::capacityFromSize(Options::gcMarkStackSegmentSize))
91     , m_top(0)
92     , m_numberOfPreviousSegments(0)
93 {
94     m_topSegment = m_allocator.allocate();
95 #if !ASSERT_DISABLED
96     m_topSegment->m_top = 0;
97 #endif
98     m_topSegment->m_previous = 0;
99 }
100
101 MarkStackArray::~MarkStackArray()
102 {
103     ASSERT(!m_topSegment->m_previous);
104     m_allocator.release(m_topSegment);
105 }
106
107 void MarkStackArray::expand()
108 {
109     ASSERT(m_topSegment->m_top == m_segmentCapacity);
110     
111     m_numberOfPreviousSegments++;
112     
113     MarkStackSegment* nextSegment = m_allocator.allocate();
114 #if !ASSERT_DISABLED
115     nextSegment->m_top = 0;
116 #endif
117     nextSegment->m_previous = m_topSegment;
118     m_topSegment = nextSegment;
119     setTopForEmptySegment();
120     validatePrevious();
121 }
122
123 bool MarkStackArray::refill()
124 {
125     validatePrevious();
126     if (top())
127         return true;
128     MarkStackSegment* toFree = m_topSegment;
129     MarkStackSegment* previous = m_topSegment->m_previous;
130     if (!previous)
131         return false;
132     ASSERT(m_numberOfPreviousSegments);
133     m_numberOfPreviousSegments--;
134     m_topSegment = previous;
135     m_allocator.release(toFree);
136     setTopForFullSegment();
137     validatePrevious();
138     return true;
139 }
140
141 bool MarkStackArray::donateSomeCellsTo(MarkStackArray& other)
142 {
143     ASSERT(m_segmentCapacity == other.m_segmentCapacity);
144     validatePrevious();
145     other.validatePrevious();
146         
147     // Fast check: see if the other mark stack already has enough segments.
148     if (other.m_numberOfPreviousSegments + 1 >= Options::maximumNumberOfSharedSegments)
149         return false;
150         
151     size_t numberOfCellsToKeep = Options::minimumNumberOfCellsToKeep;
152     ASSERT(m_top > numberOfCellsToKeep || m_topSegment->m_previous);
153         
154     // Looks like we should donate! Give the other mark stack all of our
155     // previous segments, and then top it off.
156     MarkStackSegment* previous = m_topSegment->m_previous;
157     while (previous) {
158         ASSERT(m_numberOfPreviousSegments);
159
160         MarkStackSegment* current = previous;
161         previous = current->m_previous;
162             
163         current->m_previous = other.m_topSegment->m_previous;
164         other.m_topSegment->m_previous = current;
165             
166         m_numberOfPreviousSegments--;
167         other.m_numberOfPreviousSegments++;
168     }
169     ASSERT(!m_numberOfPreviousSegments);
170     m_topSegment->m_previous = 0;
171     validatePrevious();
172     other.validatePrevious();
173         
174     // Now top off. We want to keep at a minimum numberOfCellsToKeep, but if
175     // we really have a lot of work, we give up half.
176     if (m_top > numberOfCellsToKeep * 2)
177         numberOfCellsToKeep = m_top / 2;
178     while (m_top > numberOfCellsToKeep)
179         other.append(removeLast());
180         
181     return true;
182 }
183
184 void MarkStackArray::stealSomeCellsFrom(MarkStackArray& other)
185 {
186     ASSERT(m_segmentCapacity == other.m_segmentCapacity);
187     validatePrevious();
188     other.validatePrevious();
189         
190     // If other has an entire segment, steal it and return.
191     if (other.m_topSegment->m_previous) {
192         ASSERT(other.m_topSegment->m_previous->m_top == m_segmentCapacity);
193             
194         // First remove a segment from other.
195         MarkStackSegment* current = other.m_topSegment->m_previous;
196         other.m_topSegment->m_previous = current->m_previous;
197         other.m_numberOfPreviousSegments--;
198             
199         ASSERT(!!other.m_numberOfPreviousSegments == !!other.m_topSegment->m_previous);
200             
201         // Now add it to this.
202         current->m_previous = m_topSegment->m_previous;
203         m_topSegment->m_previous = current;
204         m_numberOfPreviousSegments++;
205             
206         validatePrevious();
207         other.validatePrevious();
208         return;
209     }
210         
211     // Otherwise drain 1/Nth of the shared array where N is the number of
212     // workers, or Options::minimumNumberOfCellsToKeep, whichever is bigger.
213     size_t numberOfCellsToSteal = std::max((size_t)Options::minimumNumberOfCellsToKeep, other.size() / Options::numberOfGCMarkers);
214     while (numberOfCellsToSteal-- > 0 && other.canRemoveLast())
215         append(other.removeLast());
216 }
217
218 #if ENABLE(PARALLEL_GC)
219 void MarkStackThreadSharedData::markingThreadMain()
220 {
221     WTF::registerGCThread();
222     SlotVisitor slotVisitor(*this);
223     ParallelModeEnabler enabler(slotVisitor);
224     slotVisitor.drainFromShared(SlotVisitor::SlaveDrain);
225 }
226
227 void* MarkStackThreadSharedData::markingThreadStartFunc(void* shared)
228 {
229     static_cast<MarkStackThreadSharedData*>(shared)->markingThreadMain();
230     return 0;
231 }
232 #endif
233
234 MarkStackThreadSharedData::MarkStackThreadSharedData(JSGlobalData* globalData)
235     : m_globalData(globalData)
236     , m_sharedMarkStack(m_segmentAllocator)
237     , m_numberOfActiveParallelMarkers(0)
238     , m_parallelMarkersShouldExit(false)
239 {
240 #if ENABLE(PARALLEL_GC)
241     for (unsigned i = 1; i < Options::numberOfGCMarkers; ++i) {
242         m_markingThreads.append(createThread(markingThreadStartFunc, this, "JavaScriptCore::Marking"));
243         ASSERT(m_markingThreads.last());
244     }
245 #endif
246 }
247
248 MarkStackThreadSharedData::~MarkStackThreadSharedData()
249 {
250 #if ENABLE(PARALLEL_GC)    
251     // Destroy our marking threads.
252     {
253         MutexLocker locker(m_markingLock);
254         m_parallelMarkersShouldExit = true;
255         m_markingCondition.broadcast();
256     }
257     for (unsigned i = 0; i < m_markingThreads.size(); ++i)
258         waitForThreadCompletion(m_markingThreads[i], 0);
259 #endif
260 }
261     
262 void MarkStackThreadSharedData::reset()
263 {
264     ASSERT(!m_numberOfActiveParallelMarkers);
265     ASSERT(!m_parallelMarkersShouldExit);
266     ASSERT(m_sharedMarkStack.isEmpty());
267     
268 #if ENABLE(PARALLEL_GC)
269     m_segmentAllocator.shrinkReserve();
270     m_opaqueRoots.clear();
271 #else
272     ASSERT(m_opaqueRoots.isEmpty());
273 #endif
274     
275     m_weakReferenceHarvesters.removeAll();
276 }
277
278 void MarkStack::reset()
279 {
280     m_visitCount = 0;
281     ASSERT(m_stack.isEmpty());
282 #if ENABLE(PARALLEL_GC)
283     ASSERT(m_opaqueRoots.isEmpty()); // Should have merged by now.
284 #else
285     m_opaqueRoots.clear();
286 #endif
287 }
288
289 void MarkStack::append(ConservativeRoots& conservativeRoots)
290 {
291     JSCell** roots = conservativeRoots.roots();
292     size_t size = conservativeRoots.size();
293     for (size_t i = 0; i < size; ++i)
294         internalAppend(roots[i]);
295 }
296
297 ALWAYS_INLINE static void visitChildren(SlotVisitor& visitor, const JSCell* cell)
298 {
299 #if ENABLE(SIMPLE_HEAP_PROFILING)
300     m_visitedTypeCounts.count(cell);
301 #endif
302
303     ASSERT(Heap::isMarked(cell));
304
305     if (isJSString(cell)) {
306         JSString::visitChildren(const_cast<JSCell*>(cell), visitor);
307         return;
308     }
309
310     if (isJSFinalObject(cell)) {
311         JSObject::visitChildren(const_cast<JSCell*>(cell), visitor);
312         return;
313     }
314
315     if (isJSArray(cell)) {
316         JSArray::visitChildren(const_cast<JSCell*>(cell), visitor);
317         return;
318     }
319
320     cell->methodTable()->visitChildren(const_cast<JSCell*>(cell), visitor);
321 }
322
323 void SlotVisitor::donateSlow()
324 {
325     // Refuse to donate if shared has more entries than I do.
326     if (m_shared.m_sharedMarkStack.size() > m_stack.size())
327         return;
328     MutexLocker locker(m_shared.m_markingLock);
329     if (m_stack.donateSomeCellsTo(m_shared.m_sharedMarkStack)) {
330         // Only wake up threads if the shared stack is big enough; otherwise assume that
331         // it's more profitable for us to just scan this ourselves later.
332         if (m_shared.m_sharedMarkStack.size() >= Options::sharedStackWakeupThreshold)
333             m_shared.m_markingCondition.broadcast();
334     }
335 }
336
337 void SlotVisitor::drain()
338 {
339     ASSERT(m_isInParallelMode);
340     
341 #if ENABLE(PARALLEL_GC)
342     if (Options::numberOfGCMarkers > 1) {
343         while (!m_stack.isEmpty()) {
344             m_stack.refill();
345             for (unsigned countdown = Options::minimumNumberOfScansBetweenRebalance; m_stack.canRemoveLast() && countdown--;)
346                 visitChildren(*this, m_stack.removeLast());
347             donateKnownParallel();
348         }
349         
350         mergeOpaqueRootsIfNecessary();
351         return;
352     }
353 #endif
354     
355     while (!m_stack.isEmpty()) {
356         m_stack.refill();
357         while (m_stack.canRemoveLast())
358             visitChildren(*this, m_stack.removeLast());
359     }
360 }
361
362 void SlotVisitor::drainFromShared(SharedDrainMode sharedDrainMode)
363 {
364     ASSERT(m_isInParallelMode);
365     
366     ASSERT(Options::numberOfGCMarkers);
367     
368     bool shouldBeParallel;
369
370 #if ENABLE(PARALLEL_GC)
371     shouldBeParallel = Options::numberOfGCMarkers > 1;
372 #else
373     ASSERT(Options::numberOfGCMarkers == 1);
374     shouldBeParallel = false;
375 #endif
376     
377     if (!shouldBeParallel) {
378         // This call should be a no-op.
379         ASSERT_UNUSED(sharedDrainMode, sharedDrainMode == MasterDrain);
380         ASSERT(m_stack.isEmpty());
381         ASSERT(m_shared.m_sharedMarkStack.isEmpty());
382         return;
383     }
384     
385 #if ENABLE(PARALLEL_GC)
386     {
387         MutexLocker locker(m_shared.m_markingLock);
388         m_shared.m_numberOfActiveParallelMarkers++;
389     }
390     while (true) {
391         {
392             MutexLocker locker(m_shared.m_markingLock);
393             m_shared.m_numberOfActiveParallelMarkers--;
394
395             // How we wait differs depending on drain mode.
396             if (sharedDrainMode == MasterDrain) {
397                 // Wait until either termination is reached, or until there is some work
398                 // for us to do.
399                 while (true) {
400                     // Did we reach termination?
401                     if (!m_shared.m_numberOfActiveParallelMarkers && m_shared.m_sharedMarkStack.isEmpty())
402                         return;
403                     
404                     // Is there work to be done?
405                     if (!m_shared.m_sharedMarkStack.isEmpty())
406                         break;
407                     
408                     // Otherwise wait.
409                     m_shared.m_markingCondition.wait(m_shared.m_markingLock);
410                 }
411             } else {
412                 ASSERT(sharedDrainMode == SlaveDrain);
413                 
414                 // Did we detect termination? If so, let the master know.
415                 if (!m_shared.m_numberOfActiveParallelMarkers && m_shared.m_sharedMarkStack.isEmpty())
416                     m_shared.m_markingCondition.broadcast();
417                 
418                 while (m_shared.m_sharedMarkStack.isEmpty() && !m_shared.m_parallelMarkersShouldExit)
419                     m_shared.m_markingCondition.wait(m_shared.m_markingLock);
420                 
421                 // Is the VM exiting? If so, exit this thread.
422                 if (m_shared.m_parallelMarkersShouldExit)
423                     return;
424             }
425             
426             m_stack.stealSomeCellsFrom(m_shared.m_sharedMarkStack);
427             m_shared.m_numberOfActiveParallelMarkers++;
428         }
429         
430         drain();
431     }
432 #endif
433 }
434
435 void MarkStack::mergeOpaqueRoots()
436 {
437     ASSERT(!m_opaqueRoots.isEmpty()); // Should only be called when opaque roots are non-empty.
438     {
439         MutexLocker locker(m_shared.m_opaqueRootsLock);
440         HashSet<void*>::iterator begin = m_opaqueRoots.begin();
441         HashSet<void*>::iterator end = m_opaqueRoots.end();
442         for (HashSet<void*>::iterator iter = begin; iter != end; ++iter)
443             m_shared.m_opaqueRoots.add(*iter);
444     }
445     m_opaqueRoots.clear();
446 }
447
448 void SlotVisitor::harvestWeakReferences()
449 {
450     for (WeakReferenceHarvester* current = m_shared.m_weakReferenceHarvesters.head(); current; current = current->next())
451         current->visitWeakReferences(*this);
452 }
453
454 void SlotVisitor::finalizeUnconditionalFinalizers()
455 {
456     while (m_shared.m_unconditionalFinalizers.hasNext())
457         m_shared.m_unconditionalFinalizers.removeNext()->finalizeUnconditionally();
458 }
459
460 #if ENABLE(GC_VALIDATION)
461 void MarkStack::validate(JSCell* cell)
462 {
463     if (!cell)
464         CRASH();
465
466     if (!cell->structure())
467         CRASH();
468
469     // Both the cell's structure, and the cell's structure's structure should be the Structure Structure.
470     // I hate this sentence.
471     if (cell->structure()->structure()->JSCell::classInfo() != cell->structure()->JSCell::classInfo())
472         CRASH();
473 }
474 #else
475 void MarkStack::validate(JSCell*)
476 {
477 }
478 #endif
479
480 } // namespace JSC