Roll out r108309, r108323, and r108326
[WebKit-https.git] / Source / JavaScriptCore / heap / MarkStack.cpp
1 /*
2  * Copyright (C) 2009, 2011 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "MarkStack.h"
28
29 #include "CopiedSpace.h"
30 #include "CopiedSpaceInlineMethods.h"
31 #include "ConservativeRoots.h"
32 #include "Heap.h"
33 #include "Options.h"
34 #include "JSArray.h"
35 #include "JSCell.h"
36 #include "JSObject.h"
37 #include "ScopeChain.h"
38 #include "Structure.h"
39 #include "WriteBarrier.h"
40 #include <wtf/MainThread.h>
41
42 namespace JSC {
43
44 MarkStackSegmentAllocator::MarkStackSegmentAllocator()
45     : m_nextFreeSegment(0)
46 {
47 }
48
49 MarkStackSegmentAllocator::~MarkStackSegmentAllocator()
50 {
51     shrinkReserve();
52 }
53
54 MarkStackSegment* MarkStackSegmentAllocator::allocate()
55 {
56     {
57         MutexLocker locker(m_lock);
58         if (m_nextFreeSegment) {
59             MarkStackSegment* result = m_nextFreeSegment;
60             m_nextFreeSegment = result->m_previous;
61             return result;
62         }
63     }
64
65     return static_cast<MarkStackSegment*>(OSAllocator::reserveAndCommit(Options::gcMarkStackSegmentSize));
66 }
67
68 void MarkStackSegmentAllocator::release(MarkStackSegment* segment)
69 {
70     MutexLocker locker(m_lock);
71     segment->m_previous = m_nextFreeSegment;
72     m_nextFreeSegment = segment;
73 }
74
75 void MarkStackSegmentAllocator::shrinkReserve()
76 {
77     MarkStackSegment* segments;
78     {
79         MutexLocker locker(m_lock);
80         segments = m_nextFreeSegment;
81         m_nextFreeSegment = 0;
82     }
83     while (segments) {
84         MarkStackSegment* toFree = segments;
85         segments = segments->m_previous;
86         OSAllocator::decommitAndRelease(toFree, Options::gcMarkStackSegmentSize);
87     }
88 }
89
90 MarkStackArray::MarkStackArray(MarkStackSegmentAllocator& allocator)
91     : m_allocator(allocator)
92     , m_segmentCapacity(MarkStackSegment::capacityFromSize(Options::gcMarkStackSegmentSize))
93     , m_top(0)
94     , m_numberOfPreviousSegments(0)
95 {
96     m_topSegment = m_allocator.allocate();
97 #if !ASSERT_DISABLED
98     m_topSegment->m_top = 0;
99 #endif
100     m_topSegment->m_previous = 0;
101 }
102
103 MarkStackArray::~MarkStackArray()
104 {
105     ASSERT(!m_topSegment->m_previous);
106     m_allocator.release(m_topSegment);
107 }
108
109 void MarkStackArray::expand()
110 {
111     ASSERT(m_topSegment->m_top == m_segmentCapacity);
112     
113     m_numberOfPreviousSegments++;
114     
115     MarkStackSegment* nextSegment = m_allocator.allocate();
116 #if !ASSERT_DISABLED
117     nextSegment->m_top = 0;
118 #endif
119     nextSegment->m_previous = m_topSegment;
120     m_topSegment = nextSegment;
121     setTopForEmptySegment();
122     validatePrevious();
123 }
124
125 bool MarkStackArray::refill()
126 {
127     validatePrevious();
128     if (top())
129         return true;
130     MarkStackSegment* toFree = m_topSegment;
131     MarkStackSegment* previous = m_topSegment->m_previous;
132     if (!previous)
133         return false;
134     ASSERT(m_numberOfPreviousSegments);
135     m_numberOfPreviousSegments--;
136     m_topSegment = previous;
137     m_allocator.release(toFree);
138     setTopForFullSegment();
139     validatePrevious();
140     return true;
141 }
142
143 bool MarkStackArray::donateSomeCellsTo(MarkStackArray& other)
144 {
145     ASSERT(m_segmentCapacity == other.m_segmentCapacity);
146     validatePrevious();
147     other.validatePrevious();
148         
149     // Fast check: see if the other mark stack already has enough segments.
150     if (other.m_numberOfPreviousSegments + 1 >= Options::maximumNumberOfSharedSegments)
151         return false;
152         
153     size_t numberOfCellsToKeep = Options::minimumNumberOfCellsToKeep;
154     ASSERT(m_top > numberOfCellsToKeep || m_topSegment->m_previous);
155         
156     // Looks like we should donate! Give the other mark stack all of our
157     // previous segments, and then top it off.
158     MarkStackSegment* previous = m_topSegment->m_previous;
159     while (previous) {
160         ASSERT(m_numberOfPreviousSegments);
161
162         MarkStackSegment* current = previous;
163         previous = current->m_previous;
164             
165         current->m_previous = other.m_topSegment->m_previous;
166         other.m_topSegment->m_previous = current;
167             
168         m_numberOfPreviousSegments--;
169         other.m_numberOfPreviousSegments++;
170     }
171     ASSERT(!m_numberOfPreviousSegments);
172     m_topSegment->m_previous = 0;
173     validatePrevious();
174     other.validatePrevious();
175         
176     // Now top off. We want to keep at a minimum numberOfCellsToKeep, but if
177     // we really have a lot of work, we give up half.
178     if (m_top > numberOfCellsToKeep * 2)
179         numberOfCellsToKeep = m_top / 2;
180     while (m_top > numberOfCellsToKeep)
181         other.append(removeLast());
182         
183     return true;
184 }
185
186 void MarkStackArray::stealSomeCellsFrom(MarkStackArray& other)
187 {
188     ASSERT(m_segmentCapacity == other.m_segmentCapacity);
189     validatePrevious();
190     other.validatePrevious();
191         
192     // If other has an entire segment, steal it and return.
193     if (other.m_topSegment->m_previous) {
194         ASSERT(other.m_topSegment->m_previous->m_top == m_segmentCapacity);
195             
196         // First remove a segment from other.
197         MarkStackSegment* current = other.m_topSegment->m_previous;
198         other.m_topSegment->m_previous = current->m_previous;
199         other.m_numberOfPreviousSegments--;
200             
201         ASSERT(!!other.m_numberOfPreviousSegments == !!other.m_topSegment->m_previous);
202             
203         // Now add it to this.
204         current->m_previous = m_topSegment->m_previous;
205         m_topSegment->m_previous = current;
206         m_numberOfPreviousSegments++;
207             
208         validatePrevious();
209         other.validatePrevious();
210         return;
211     }
212         
213     // Otherwise drain 1/Nth of the shared array where N is the number of
214     // workers, or Options::minimumNumberOfCellsToKeep, whichever is bigger.
215     size_t numberOfCellsToSteal = std::max((size_t)Options::minimumNumberOfCellsToKeep, other.size() / Options::numberOfGCMarkers);
216     while (numberOfCellsToSteal-- > 0 && other.canRemoveLast())
217         append(other.removeLast());
218 }
219
220 #if ENABLE(PARALLEL_GC)
221 void MarkStackThreadSharedData::markingThreadMain()
222 {
223     WTF::registerGCThread();
224     SlotVisitor slotVisitor(*this);
225     ParallelModeEnabler enabler(slotVisitor);
226     slotVisitor.drainFromShared(SlotVisitor::SlaveDrain);
227 }
228
229 void MarkStackThreadSharedData::markingThreadStartFunc(void* shared)
230 {
231     static_cast<MarkStackThreadSharedData*>(shared)->markingThreadMain();
232 }
233 #endif
234
235 MarkStackThreadSharedData::MarkStackThreadSharedData(JSGlobalData* globalData)
236     : m_globalData(globalData)
237     , m_copiedSpace(&globalData->heap.m_storageSpace)
238     , m_sharedMarkStack(m_segmentAllocator)
239     , m_numberOfActiveParallelMarkers(0)
240     , m_parallelMarkersShouldExit(false)
241 {
242 #if ENABLE(PARALLEL_GC)
243     for (unsigned i = 1; i < Options::numberOfGCMarkers; ++i) {
244         m_markingThreads.append(createThread(markingThreadStartFunc, this, "JavaScriptCore::Marking"));
245         ASSERT(m_markingThreads.last());
246     }
247 #endif
248 }
249
250 MarkStackThreadSharedData::~MarkStackThreadSharedData()
251 {
252 #if ENABLE(PARALLEL_GC)    
253     // Destroy our marking threads.
254     {
255         MutexLocker locker(m_markingLock);
256         m_parallelMarkersShouldExit = true;
257         m_markingCondition.broadcast();
258     }
259     for (unsigned i = 0; i < m_markingThreads.size(); ++i)
260         waitForThreadCompletion(m_markingThreads[i]);
261 #endif
262 }
263     
264 void MarkStackThreadSharedData::reset()
265 {
266     ASSERT(!m_numberOfActiveParallelMarkers);
267     ASSERT(!m_parallelMarkersShouldExit);
268     ASSERT(m_sharedMarkStack.isEmpty());
269     
270 #if ENABLE(PARALLEL_GC)
271     m_segmentAllocator.shrinkReserve();
272     m_opaqueRoots.clear();
273 #else
274     ASSERT(m_opaqueRoots.isEmpty());
275 #endif
276     
277     m_weakReferenceHarvesters.removeAll();
278 }
279
280 void MarkStack::reset()
281 {
282     m_visitCount = 0;
283     ASSERT(m_stack.isEmpty());
284 #if ENABLE(PARALLEL_GC)
285     ASSERT(m_opaqueRoots.isEmpty()); // Should have merged by now.
286 #else
287     m_opaqueRoots.clear();
288 #endif
289 }
290
291 void MarkStack::append(ConservativeRoots& conservativeRoots)
292 {
293     JSCell** roots = conservativeRoots.roots();
294     size_t size = conservativeRoots.size();
295     for (size_t i = 0; i < size; ++i)
296         internalAppend(roots[i]);
297 }
298
299 ALWAYS_INLINE static void visitChildren(SlotVisitor& visitor, const JSCell* cell)
300 {
301 #if ENABLE(SIMPLE_HEAP_PROFILING)
302     m_visitedTypeCounts.count(cell);
303 #endif
304
305     ASSERT(Heap::isMarked(cell));
306
307     if (isJSString(cell)) {
308         JSString::visitChildren(const_cast<JSCell*>(cell), visitor);
309         return;
310     }
311
312     if (isJSFinalObject(cell)) {
313         JSObject::visitChildren(const_cast<JSCell*>(cell), visitor);
314         return;
315     }
316
317     if (isJSArray(cell)) {
318         JSArray::visitChildren(const_cast<JSCell*>(cell), visitor);
319         return;
320     }
321
322     cell->methodTable()->visitChildren(const_cast<JSCell*>(cell), visitor);
323 }
324
325 void SlotVisitor::donateSlow()
326 {
327     // Refuse to donate if shared has more entries than I do.
328     if (m_shared.m_sharedMarkStack.size() > m_stack.size())
329         return;
330     MutexLocker locker(m_shared.m_markingLock);
331     if (m_stack.donateSomeCellsTo(m_shared.m_sharedMarkStack)) {
332         // Only wake up threads if the shared stack is big enough; otherwise assume that
333         // it's more profitable for us to just scan this ourselves later.
334         if (m_shared.m_sharedMarkStack.size() >= Options::sharedStackWakeupThreshold)
335             m_shared.m_markingCondition.broadcast();
336     }
337 }
338
339 void SlotVisitor::drain()
340 {
341     ASSERT(m_isInParallelMode);
342    
343 #if ENABLE(PARALLEL_GC)
344     if (Options::numberOfGCMarkers > 1) {
345         while (!m_stack.isEmpty()) {
346             m_stack.refill();
347             for (unsigned countdown = Options::minimumNumberOfScansBetweenRebalance; m_stack.canRemoveLast() && countdown--;)
348                 visitChildren(*this, m_stack.removeLast());
349             donateKnownParallel();
350         }
351         
352         mergeOpaqueRootsIfNecessary();
353         return;
354     }
355 #endif
356     
357     while (!m_stack.isEmpty()) {
358         m_stack.refill();
359         while (m_stack.canRemoveLast())
360             visitChildren(*this, m_stack.removeLast());
361     }
362 }
363
364 void SlotVisitor::drainFromShared(SharedDrainMode sharedDrainMode)
365 {
366     ASSERT(m_isInParallelMode);
367     
368     ASSERT(Options::numberOfGCMarkers);
369     
370     bool shouldBeParallel;
371
372 #if ENABLE(PARALLEL_GC)
373     shouldBeParallel = Options::numberOfGCMarkers > 1;
374 #else
375     ASSERT(Options::numberOfGCMarkers == 1);
376     shouldBeParallel = false;
377 #endif
378     
379     if (!shouldBeParallel) {
380         // This call should be a no-op.
381         ASSERT_UNUSED(sharedDrainMode, sharedDrainMode == MasterDrain);
382         ASSERT(m_stack.isEmpty());
383         ASSERT(m_shared.m_sharedMarkStack.isEmpty());
384         return;
385     }
386     
387 #if ENABLE(PARALLEL_GC)
388     {
389         MutexLocker locker(m_shared.m_markingLock);
390         m_shared.m_numberOfActiveParallelMarkers++;
391     }
392     while (true) {
393         {
394             MutexLocker locker(m_shared.m_markingLock);
395             m_shared.m_numberOfActiveParallelMarkers--;
396
397             // How we wait differs depending on drain mode.
398             if (sharedDrainMode == MasterDrain) {
399                 // Wait until either termination is reached, or until there is some work
400                 // for us to do.
401                 while (true) {
402                     // Did we reach termination?
403                     if (!m_shared.m_numberOfActiveParallelMarkers && m_shared.m_sharedMarkStack.isEmpty()) {
404                         // Let any sleeping slaves know it's time for them to give their private CopiedBlocks back
405                         m_shared.m_markingCondition.broadcast();
406                         return;
407                     }
408                     
409                     // Is there work to be done?
410                     if (!m_shared.m_sharedMarkStack.isEmpty())
411                         break;
412                     
413                     // Otherwise wait.
414                     m_shared.m_markingCondition.wait(m_shared.m_markingLock);
415                 }
416             } else {
417                 ASSERT(sharedDrainMode == SlaveDrain);
418                 
419                 // Did we detect termination? If so, let the master know.
420                 if (!m_shared.m_numberOfActiveParallelMarkers && m_shared.m_sharedMarkStack.isEmpty())
421                     m_shared.m_markingCondition.broadcast();
422                 
423                 while (m_shared.m_sharedMarkStack.isEmpty() && !m_shared.m_parallelMarkersShouldExit) {
424                     if (!m_shared.m_numberOfActiveParallelMarkers && m_shared.m_sharedMarkStack.isEmpty())
425                         doneCopying();
426                     m_shared.m_markingCondition.wait(m_shared.m_markingLock);
427                 }
428                 
429                 // Is the VM exiting? If so, exit this thread.
430                 if (m_shared.m_parallelMarkersShouldExit) {
431                     doneCopying();
432                     return;
433                 }
434             }
435            
436             m_stack.stealSomeCellsFrom(m_shared.m_sharedMarkStack);
437             m_shared.m_numberOfActiveParallelMarkers++;
438         }
439         
440         drain();
441     }
442 #endif
443 }
444
445 void MarkStack::mergeOpaqueRoots()
446 {
447     ASSERT(!m_opaqueRoots.isEmpty()); // Should only be called when opaque roots are non-empty.
448     {
449         MutexLocker locker(m_shared.m_opaqueRootsLock);
450         HashSet<void*>::iterator begin = m_opaqueRoots.begin();
451         HashSet<void*>::iterator end = m_opaqueRoots.end();
452         for (HashSet<void*>::iterator iter = begin; iter != end; ++iter)
453             m_shared.m_opaqueRoots.add(*iter);
454     }
455     m_opaqueRoots.clear();
456 }
457
458 void SlotVisitor::startCopying()
459 {
460     ASSERT(!m_copyBlock);
461     if (!m_shared.m_copiedSpace->borrowBlock(&m_copyBlock))
462         CRASH();
463 }    
464
465 void* SlotVisitor::allocateNewSpace(void* ptr, size_t bytes)
466 {
467     if (CopiedSpace::isOversize(bytes)) {
468         m_shared.m_copiedSpace->pin(CopiedSpace::oversizeBlockFor(ptr));
469         return 0;
470     }
471
472     if (m_shared.m_copiedSpace->isPinned(ptr))
473         return 0;
474
475     // The only time it's possible to have a null copy block is if we have just started copying.
476     if (!m_copyBlock)
477         startCopying();
478
479     if (!CopiedSpace::fitsInBlock(m_copyBlock, bytes)) {
480         // We don't need to lock across these two calls because the master thread won't 
481         // call doneCopying() because this thread is considered active.
482         m_shared.m_copiedSpace->doneFillingBlock(m_copyBlock);
483         if (!m_shared.m_copiedSpace->borrowBlock(&m_copyBlock))
484             CRASH();
485     }
486     return CopiedSpace::allocateFromBlock(m_copyBlock, bytes);
487 }
488
489 void SlotVisitor::copy(void** ptr, size_t bytes)
490 {
491     void* newPtr = 0;
492     if (!(newPtr = allocateNewSpace(*ptr, bytes)))
493         return;
494
495     memcpy(newPtr, *ptr, bytes);
496     *ptr = newPtr;
497 }
498
499 void SlotVisitor::copyAndAppend(void** ptr, size_t bytes, JSValue* values, unsigned length)
500 {
501     void* oldPtr = *ptr;
502     void* newPtr = allocateNewSpace(oldPtr, bytes);
503     if (newPtr) {
504         size_t jsValuesOffset = static_cast<size_t>(reinterpret_cast<char*>(values) - static_cast<char*>(oldPtr));
505
506         JSValue* newValues = reinterpret_cast<JSValue*>(static_cast<char*>(newPtr) + jsValuesOffset);
507         for (unsigned i = 0; i < length; i++) {
508             JSValue& value = values[i];
509             newValues[i] = value;
510             if (!value)
511                 continue;
512             internalAppend(value);
513         }
514
515         memcpy(newPtr, oldPtr, jsValuesOffset);
516         *ptr = newPtr;
517     } else
518         append(values, length);
519 }
520     
521 void SlotVisitor::doneCopying()
522 {
523     if (!m_copyBlock)
524         return;
525
526     m_shared.m_copiedSpace->doneFillingBlock(m_copyBlock);
527
528     m_copyBlock = 0;
529 }
530
531 void SlotVisitor::harvestWeakReferences()
532 {
533     for (WeakReferenceHarvester* current = m_shared.m_weakReferenceHarvesters.head(); current; current = current->next())
534         current->visitWeakReferences(*this);
535 }
536
537 void SlotVisitor::finalizeUnconditionalFinalizers()
538 {
539     while (m_shared.m_unconditionalFinalizers.hasNext())
540         m_shared.m_unconditionalFinalizers.removeNext()->finalizeUnconditionally();
541 }
542
543 #if ENABLE(GC_VALIDATION)
544 void MarkStack::validate(JSCell* cell)
545 {
546     if (!cell)
547         CRASH();
548
549     if (!cell->structure())
550         CRASH();
551
552     // Both the cell's structure, and the cell's structure's structure should be the Structure Structure.
553     // I hate this sentence.
554     if (cell->structure()->structure()->JSCell::classInfo() != cell->structure()->JSCell::classInfo())
555         CRASH();
556 }
557 #else
558 void MarkStack::validate(JSCell*)
559 {
560 }
561 #endif
562
563 } // namespace JSC