GC should be 1.7X faster
[WebKit-https.git] / Source / JavaScriptCore / heap / MarkStack.cpp
1 /*
2  * Copyright (C) 2009, 2011 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "MarkStack.h"
28
29 #include "CopiedSpace.h"
30 #include "CopiedSpaceInlineMethods.h"
31 #include "ConservativeRoots.h"
32 #include "Heap.h"
33 #include "Options.h"
34 #include "JSArray.h"
35 #include "JSCell.h"
36 #include "JSObject.h"
37 #include "ScopeChain.h"
38 #include "Structure.h"
39 #include "UString.h"
40 #include "WriteBarrier.h"
41 #include <wtf/DataLog.h>
42 #include <wtf/MainThread.h>
43
44 namespace JSC {
45
46 MarkStackSegmentAllocator::MarkStackSegmentAllocator()
47     : m_nextFreeSegment(0)
48 {
49     m_lock.Init();
50 }
51
52 MarkStackSegmentAllocator::~MarkStackSegmentAllocator()
53 {
54     shrinkReserve();
55 }
56
57 MarkStackSegment* MarkStackSegmentAllocator::allocate()
58 {
59     {
60         SpinLockHolder locker(&m_lock);
61         if (m_nextFreeSegment) {
62             MarkStackSegment* result = m_nextFreeSegment;
63             m_nextFreeSegment = result->m_previous;
64             return result;
65         }
66     }
67
68     return static_cast<MarkStackSegment*>(OSAllocator::reserveAndCommit(Options::gcMarkStackSegmentSize));
69 }
70
71 void MarkStackSegmentAllocator::release(MarkStackSegment* segment)
72 {
73     SpinLockHolder locker(&m_lock);
74     segment->m_previous = m_nextFreeSegment;
75     m_nextFreeSegment = segment;
76 }
77
78 void MarkStackSegmentAllocator::shrinkReserve()
79 {
80     MarkStackSegment* segments;
81     {
82         SpinLockHolder locker(&m_lock);
83         segments = m_nextFreeSegment;
84         m_nextFreeSegment = 0;
85     }
86     while (segments) {
87         MarkStackSegment* toFree = segments;
88         segments = segments->m_previous;
89         OSAllocator::decommitAndRelease(toFree, Options::gcMarkStackSegmentSize);
90     }
91 }
92
93 MarkStackArray::MarkStackArray(MarkStackSegmentAllocator& allocator)
94     : m_allocator(allocator)
95     , m_segmentCapacity(MarkStackSegment::capacityFromSize(Options::gcMarkStackSegmentSize))
96     , m_top(0)
97     , m_numberOfPreviousSegments(0)
98 {
99     m_topSegment = m_allocator.allocate();
100 #if !ASSERT_DISABLED
101     m_topSegment->m_top = 0;
102 #endif
103     m_topSegment->m_previous = 0;
104 }
105
106 MarkStackArray::~MarkStackArray()
107 {
108     ASSERT(!m_topSegment->m_previous);
109     m_allocator.release(m_topSegment);
110 }
111
112 void MarkStackArray::expand()
113 {
114     ASSERT(m_topSegment->m_top == m_segmentCapacity);
115     
116     m_numberOfPreviousSegments++;
117     
118     MarkStackSegment* nextSegment = m_allocator.allocate();
119 #if !ASSERT_DISABLED
120     nextSegment->m_top = 0;
121 #endif
122     nextSegment->m_previous = m_topSegment;
123     m_topSegment = nextSegment;
124     setTopForEmptySegment();
125     validatePrevious();
126 }
127
128 bool MarkStackArray::refill()
129 {
130     validatePrevious();
131     if (top())
132         return true;
133     MarkStackSegment* toFree = m_topSegment;
134     MarkStackSegment* previous = m_topSegment->m_previous;
135     if (!previous)
136         return false;
137     ASSERT(m_numberOfPreviousSegments);
138     m_numberOfPreviousSegments--;
139     m_topSegment = previous;
140     m_allocator.release(toFree);
141     setTopForFullSegment();
142     validatePrevious();
143     return true;
144 }
145
146 void MarkStackArray::donateSomeCellsTo(MarkStackArray& other)
147 {
148     // Try to donate about 1 / 2 of our cells. To reduce copying costs,
149     // we prefer donating whole segments over donating individual cells,
150     // even if this skews away from our 1 / 2 target.
151
152     ASSERT(m_segmentCapacity == other.m_segmentCapacity);
153
154     size_t segmentsToDonate = (m_numberOfPreviousSegments + 2 - 1) / 2; // Round up to donate 1 / 1 previous segments.
155
156     if (!segmentsToDonate) {
157         size_t cellsToDonate = m_top / 2; // Round down to donate 0 / 1 cells.
158         while (cellsToDonate--) {
159             ASSERT(m_top);
160             other.append(removeLast());
161         }
162         return;
163     }
164
165     validatePrevious();
166     other.validatePrevious();
167
168     MarkStackSegment* previous = m_topSegment->m_previous;
169     while (segmentsToDonate--) {
170         ASSERT(previous);
171         ASSERT(m_numberOfPreviousSegments);
172
173         MarkStackSegment* current = previous;
174         previous = current->m_previous;
175             
176         current->m_previous = other.m_topSegment->m_previous;
177         other.m_topSegment->m_previous = current;
178             
179         m_numberOfPreviousSegments--;
180         other.m_numberOfPreviousSegments++;
181     }
182     m_topSegment->m_previous = previous;
183
184     validatePrevious();
185     other.validatePrevious();
186 }
187
188 void MarkStackArray::stealSomeCellsFrom(MarkStackArray& other, size_t idleThreadCount)
189 {
190     // Try to steal 1 / Nth of the shared array, where N is the number of idle threads.
191     // To reduce copying costs, we prefer stealing a whole segment over stealing
192     // individual cells, even if this skews away from our 1 / N target.
193
194     ASSERT(m_segmentCapacity == other.m_segmentCapacity);
195     validatePrevious();
196     other.validatePrevious();
197         
198     // If other has an entire segment, steal it and return.
199     if (other.m_topSegment->m_previous) {
200         ASSERT(other.m_topSegment->m_previous->m_top == m_segmentCapacity);
201             
202         // First remove a segment from other.
203         MarkStackSegment* current = other.m_topSegment->m_previous;
204         other.m_topSegment->m_previous = current->m_previous;
205         other.m_numberOfPreviousSegments--;
206             
207         ASSERT(!!other.m_numberOfPreviousSegments == !!other.m_topSegment->m_previous);
208             
209         // Now add it to this.
210         current->m_previous = m_topSegment->m_previous;
211         m_topSegment->m_previous = current;
212         m_numberOfPreviousSegments++;
213             
214         validatePrevious();
215         other.validatePrevious();
216         return;
217     }
218
219     size_t numberOfCellsToSteal = (other.size() + idleThreadCount - 1) / idleThreadCount; // Round up to steal 1 / 1.
220     while (numberOfCellsToSteal-- > 0 && other.canRemoveLast())
221         append(other.removeLast());
222 }
223
224 #if ENABLE(PARALLEL_GC)
225 void MarkStackThreadSharedData::resetChildren()
226 {
227     for (unsigned i = 0; i < m_markingThreadsMarkStack.size(); ++i)
228        m_markingThreadsMarkStack[i]->reset();
229 }   
230
231 size_t MarkStackThreadSharedData::childVisitCount()
232 {       
233     unsigned long result = 0;
234     for (unsigned i = 0; i < m_markingThreadsMarkStack.size(); ++i)
235         result += m_markingThreadsMarkStack[i]->visitCount();
236     return result;
237 }
238
239 void MarkStackThreadSharedData::markingThreadMain(SlotVisitor* slotVisitor)
240 {
241     WTF::registerGCThread();
242     {
243         ParallelModeEnabler enabler(*slotVisitor);
244         slotVisitor->drainFromShared(SlotVisitor::SlaveDrain);
245     }
246     delete slotVisitor;
247 }
248
249 void MarkStackThreadSharedData::markingThreadStartFunc(void* myVisitor)
250 {               
251     SlotVisitor* slotVisitor = static_cast<SlotVisitor*>(myVisitor);
252
253     slotVisitor->sharedData().markingThreadMain(slotVisitor);
254 }
255 #endif
256
257 MarkStackThreadSharedData::MarkStackThreadSharedData(JSGlobalData* globalData)
258     : m_globalData(globalData)
259     , m_copiedSpace(&globalData->heap.m_storageSpace)
260     , m_sharedMarkStack(m_segmentAllocator)
261     , m_numberOfActiveParallelMarkers(0)
262     , m_parallelMarkersShouldExit(false)
263 {
264 #if ENABLE(PARALLEL_GC)
265     for (unsigned i = 1; i < Options::numberOfGCMarkers; ++i) {
266         SlotVisitor* slotVisitor = new SlotVisitor(*this);
267         m_markingThreadsMarkStack.append(slotVisitor);
268         m_markingThreads.append(createThread(markingThreadStartFunc, slotVisitor, "JavaScriptCore::Marking"));
269         ASSERT(m_markingThreads.last());
270     }
271 #endif
272 }
273
274 MarkStackThreadSharedData::~MarkStackThreadSharedData()
275 {
276 #if ENABLE(PARALLEL_GC)    
277     // Destroy our marking threads.
278     {
279         MutexLocker locker(m_markingLock);
280         m_parallelMarkersShouldExit = true;
281         m_markingCondition.broadcast();
282     }
283     for (unsigned i = 0; i < m_markingThreads.size(); ++i)
284         waitForThreadCompletion(m_markingThreads[i]);
285 #endif
286 }
287     
288 void MarkStackThreadSharedData::reset()
289 {
290     ASSERT(!m_numberOfActiveParallelMarkers);
291     ASSERT(!m_parallelMarkersShouldExit);
292     ASSERT(m_sharedMarkStack.isEmpty());
293     
294 #if ENABLE(PARALLEL_GC)
295     m_segmentAllocator.shrinkReserve();
296     m_opaqueRoots.clear();
297 #else
298     ASSERT(m_opaqueRoots.isEmpty());
299 #endif
300     m_weakReferenceHarvesters.removeAll();
301 }
302
303 void MarkStack::reset()
304 {
305     m_visitCount = 0;
306     ASSERT(m_stack.isEmpty());
307 #if ENABLE(PARALLEL_GC)
308     ASSERT(m_opaqueRoots.isEmpty()); // Should have merged by now.
309 #else
310     m_opaqueRoots.clear();
311 #endif
312 }
313
314 void MarkStack::append(ConservativeRoots& conservativeRoots)
315 {
316     JSCell** roots = conservativeRoots.roots();
317     size_t size = conservativeRoots.size();
318     for (size_t i = 0; i < size; ++i)
319         internalAppend(roots[i]);
320 }
321
322 ALWAYS_INLINE static void visitChildren(SlotVisitor& visitor, const JSCell* cell)
323 {
324 #if ENABLE(SIMPLE_HEAP_PROFILING)
325     m_visitedTypeCounts.count(cell);
326 #endif
327
328     ASSERT(Heap::isMarked(cell));
329     
330     if (isJSString(cell)) {
331         JSString::visitChildren(const_cast<JSCell*>(cell), visitor);
332         return;
333     }
334
335     if (isJSFinalObject(cell)) {
336         JSObject::visitChildren(const_cast<JSCell*>(cell), visitor);
337         return;
338     }
339
340     if (isJSArray(cell)) {
341         JSArray::visitChildren(const_cast<JSCell*>(cell), visitor);
342         return;
343     }
344
345     cell->methodTable()->visitChildren(const_cast<JSCell*>(cell), visitor);
346 }
347
348 void SlotVisitor::donateKnownParallel()
349 {
350     // NOTE: Because we re-try often, we can afford to be conservative, and
351     // assume that donating is not profitable.
352
353     // Avoid locking when a thread reaches a dead end in the object graph.
354     if (m_stack.size() < 2)
355         return;
356
357     // If there's already some shared work queued up, be conservative and assume
358     // that donating more is not profitable.
359     if (m_shared.m_sharedMarkStack.size())
360         return;
361
362     // If we're contending on the lock, be conservative and assume that another
363     // thread is already donating.
364     MutexTryLocker locker(m_shared.m_markingLock);
365     if (!locker.locked())
366         return;
367
368     // Otherwise, assume that a thread will go idle soon, and donate.
369     m_stack.donateSomeCellsTo(m_shared.m_sharedMarkStack);
370
371     if (m_shared.m_numberOfActiveParallelMarkers < Options::numberOfGCMarkers)
372         m_shared.m_markingCondition.broadcast();
373 }
374
375 void SlotVisitor::drain()
376 {
377     ASSERT(m_isInParallelMode);
378    
379 #if ENABLE(PARALLEL_GC)
380     if (Options::numberOfGCMarkers > 1) {
381         while (!m_stack.isEmpty()) {
382             m_stack.refill();
383             for (unsigned countdown = Options::minimumNumberOfScansBetweenRebalance; m_stack.canRemoveLast() && countdown--;)
384                 visitChildren(*this, m_stack.removeLast());
385             donateKnownParallel();
386         }
387         
388         mergeOpaqueRootsIfNecessary();
389         return;
390     }
391 #endif
392     
393     while (!m_stack.isEmpty()) {
394         m_stack.refill();
395         while (m_stack.canRemoveLast())
396             visitChildren(*this, m_stack.removeLast());
397     }
398 }
399
400 void SlotVisitor::drainFromShared(SharedDrainMode sharedDrainMode)
401 {
402     ASSERT(m_isInParallelMode);
403     
404     ASSERT(Options::numberOfGCMarkers);
405     
406     bool shouldBeParallel;
407
408 #if ENABLE(PARALLEL_GC)
409     shouldBeParallel = Options::numberOfGCMarkers > 1;
410 #else
411     ASSERT(Options::numberOfGCMarkers == 1);
412     shouldBeParallel = false;
413 #endif
414     
415     if (!shouldBeParallel) {
416         // This call should be a no-op.
417         ASSERT_UNUSED(sharedDrainMode, sharedDrainMode == MasterDrain);
418         ASSERT(m_stack.isEmpty());
419         ASSERT(m_shared.m_sharedMarkStack.isEmpty());
420         return;
421     }
422     
423 #if ENABLE(PARALLEL_GC)
424     {
425         MutexLocker locker(m_shared.m_markingLock);
426         m_shared.m_numberOfActiveParallelMarkers++;
427     }
428     while (true) {
429         {
430             MutexLocker locker(m_shared.m_markingLock);
431             m_shared.m_numberOfActiveParallelMarkers--;
432
433             // How we wait differs depending on drain mode.
434             if (sharedDrainMode == MasterDrain) {
435                 // Wait until either termination is reached, or until there is some work
436                 // for us to do.
437                 while (true) {
438                     // Did we reach termination?
439                     if (!m_shared.m_numberOfActiveParallelMarkers && m_shared.m_sharedMarkStack.isEmpty()) {
440                         // Let any sleeping slaves know it's time for them to give their private CopiedBlocks back
441                         m_shared.m_markingCondition.broadcast();
442                         return;
443                     }
444                     
445                     // Is there work to be done?
446                     if (!m_shared.m_sharedMarkStack.isEmpty())
447                         break;
448                     
449                     // Otherwise wait.
450                     m_shared.m_markingCondition.wait(m_shared.m_markingLock);
451                 }
452             } else {
453                 ASSERT(sharedDrainMode == SlaveDrain);
454                 
455                 // Did we detect termination? If so, let the master know.
456                 if (!m_shared.m_numberOfActiveParallelMarkers && m_shared.m_sharedMarkStack.isEmpty())
457                     m_shared.m_markingCondition.broadcast();
458                 
459                 while (m_shared.m_sharedMarkStack.isEmpty() && !m_shared.m_parallelMarkersShouldExit) {
460                     if (!m_shared.m_numberOfActiveParallelMarkers && m_shared.m_sharedMarkStack.isEmpty())
461                         doneCopying();
462                     m_shared.m_markingCondition.wait(m_shared.m_markingLock);
463                 }
464                 
465                 // Is the VM exiting? If so, exit this thread.
466                 if (m_shared.m_parallelMarkersShouldExit) {
467                     doneCopying();
468                     return;
469                 }
470             }
471            
472             size_t idleThreadCount = Options::numberOfGCMarkers - m_shared.m_numberOfActiveParallelMarkers;
473             m_stack.stealSomeCellsFrom(m_shared.m_sharedMarkStack, idleThreadCount);
474             m_shared.m_numberOfActiveParallelMarkers++;
475         }
476         
477         drain();
478     }
479 #endif
480 }
481
482 void MarkStack::mergeOpaqueRoots()
483 {
484     ASSERT(!m_opaqueRoots.isEmpty()); // Should only be called when opaque roots are non-empty.
485     {
486         MutexLocker locker(m_shared.m_opaqueRootsLock);
487         HashSet<void*>::iterator begin = m_opaqueRoots.begin();
488         HashSet<void*>::iterator end = m_opaqueRoots.end();
489         for (HashSet<void*>::iterator iter = begin; iter != end; ++iter)
490             m_shared.m_opaqueRoots.add(*iter);
491     }
492     m_opaqueRoots.clear();
493 }
494
495 void SlotVisitor::startCopying()
496 {
497     ASSERT(!m_copyBlock);
498     if (!m_shared.m_copiedSpace->borrowBlock(&m_copyBlock))
499         CRASH();
500 }    
501
502 void* SlotVisitor::allocateNewSpace(void* ptr, size_t bytes)
503 {
504     if (CopiedSpace::isOversize(bytes)) {
505         m_shared.m_copiedSpace->pin(CopiedSpace::oversizeBlockFor(ptr));
506         return 0;
507     }
508
509     if (m_shared.m_copiedSpace->isPinned(ptr))
510         return 0;
511
512     // The only time it's possible to have a null copy block is if we have just started copying.
513     if (!m_copyBlock)
514         startCopying();
515
516     if (!CopiedSpace::fitsInBlock(m_copyBlock, bytes)) {
517         // We don't need to lock across these two calls because the master thread won't 
518         // call doneCopying() because this thread is considered active.
519         m_shared.m_copiedSpace->doneFillingBlock(m_copyBlock);
520         if (!m_shared.m_copiedSpace->borrowBlock(&m_copyBlock))
521             CRASH();
522     }
523     return CopiedSpace::allocateFromBlock(m_copyBlock, bytes);
524 }
525
526 void SlotVisitor::copyAndAppend(void** ptr, size_t bytes, JSValue* values, unsigned length)
527 {
528     void* oldPtr = *ptr;
529     void* newPtr = allocateNewSpace(oldPtr, bytes);
530     if (newPtr) {
531         size_t jsValuesOffset = static_cast<size_t>(reinterpret_cast<char*>(values) - static_cast<char*>(oldPtr));
532
533         JSValue* newValues = reinterpret_cast_ptr<JSValue*>(static_cast<char*>(newPtr) + jsValuesOffset);
534         for (unsigned i = 0; i < length; i++) {
535             JSValue& value = values[i];
536             newValues[i] = value;
537             if (!value)
538                 continue;
539             internalAppend(value);
540         }
541
542         memcpy(newPtr, oldPtr, jsValuesOffset);
543         *ptr = newPtr;
544     } else
545         append(values, length);
546 }
547     
548 void SlotVisitor::doneCopying()
549 {
550     if (!m_copyBlock)
551         return;
552
553     m_shared.m_copiedSpace->doneFillingBlock(m_copyBlock);
554
555     m_copyBlock = 0;
556 }
557
558 void SlotVisitor::harvestWeakReferences()
559 {
560     for (WeakReferenceHarvester* current = m_shared.m_weakReferenceHarvesters.head(); current; current = current->next())
561         current->visitWeakReferences(*this);
562 }
563
564 void SlotVisitor::finalizeUnconditionalFinalizers()
565 {
566     while (m_shared.m_unconditionalFinalizers.hasNext())
567         m_shared.m_unconditionalFinalizers.removeNext()->finalizeUnconditionally();
568 }
569
570 #if ENABLE(GC_VALIDATION)
571 void MarkStack::validate(JSCell* cell)
572 {
573     if (!cell) {
574         dataLog("cell is NULL\n");
575         CRASH();
576     }
577
578     if (!cell->structure()) {
579         dataLog("cell at %p has a null structure\n" , cell);
580         CRASH();
581     }
582
583     // Both the cell's structure, and the cell's structure's structure should be the Structure Structure.
584     // I hate this sentence.
585     if (cell->structure()->structure()->JSCell::classInfo() != cell->structure()->JSCell::classInfo()) {
586         const char* parentClassName = 0;
587         const char* ourClassName = 0;
588         if (cell->structure()->structure() && cell->structure()->structure()->JSCell::classInfo())
589             parentClassName = cell->structure()->structure()->JSCell::classInfo()->className;
590         if (cell->structure()->JSCell::classInfo())
591             ourClassName = cell->structure()->JSCell::classInfo()->className;
592         dataLog("parent structure (%p <%s>) of cell at %p doesn't match cell's structure (%p <%s>)\n",
593                 cell->structure()->structure(), parentClassName, cell, cell->structure(), ourClassName);
594         CRASH();
595     }
596 }
597 #else
598 void MarkStack::validate(JSCell*)
599 {
600 }
601 #endif
602
603 } // namespace JSC