6ef9f9e049e024957b8deca977ccef60161b3335
[WebKit-https.git] / Source / JavaScriptCore / heap / MarkStack.cpp
1 /*
2  * Copyright (C) 2009, 2011 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "MarkStack.h"
28 #include "MarkStackInlineMethods.h"
29
30 #include "CopiedSpace.h"
31 #include "CopiedSpaceInlineMethods.h"
32 #include "ConservativeRoots.h"
33 #include "Heap.h"
34 #include "Options.h"
35 #include "JSArray.h"
36 #include "JSCell.h"
37 #include "JSObject.h"
38 #include "ScopeChain.h"
39 #include "SlotVisitorInlineMethods.h"
40 #include "Structure.h"
41 #include "WriteBarrier.h"
42 #include <wtf/Atomics.h>
43 #include <wtf/DataLog.h>
44 #include <wtf/MainThread.h>
45
46 namespace JSC {
47
48 MarkStackSegmentAllocator::MarkStackSegmentAllocator()
49     : m_nextFreeSegment(0)
50 {
51     m_lock.Init();
52 }
53
54 MarkStackSegmentAllocator::~MarkStackSegmentAllocator()
55 {
56     shrinkReserve();
57 }
58
59 MarkStackSegment* MarkStackSegmentAllocator::allocate()
60 {
61     {
62         SpinLockHolder locker(&m_lock);
63         if (m_nextFreeSegment) {
64             MarkStackSegment* result = m_nextFreeSegment;
65             m_nextFreeSegment = result->m_previous;
66             return result;
67         }
68     }
69
70     return static_cast<MarkStackSegment*>(OSAllocator::reserveAndCommit(Options::gcMarkStackSegmentSize()));
71 }
72
73 void MarkStackSegmentAllocator::release(MarkStackSegment* segment)
74 {
75     SpinLockHolder locker(&m_lock);
76     segment->m_previous = m_nextFreeSegment;
77     m_nextFreeSegment = segment;
78 }
79
80 void MarkStackSegmentAllocator::shrinkReserve()
81 {
82     MarkStackSegment* segments;
83     {
84         SpinLockHolder locker(&m_lock);
85         segments = m_nextFreeSegment;
86         m_nextFreeSegment = 0;
87     }
88     while (segments) {
89         MarkStackSegment* toFree = segments;
90         segments = segments->m_previous;
91         OSAllocator::decommitAndRelease(toFree, Options::gcMarkStackSegmentSize());
92     }
93 }
94
95 MarkStackArray::MarkStackArray(MarkStackSegmentAllocator& allocator)
96     : m_allocator(allocator)
97     , m_segmentCapacity(MarkStackSegment::capacityFromSize(Options::gcMarkStackSegmentSize()))
98     , m_top(0)
99     , m_numberOfPreviousSegments(0)
100 {
101     m_topSegment = m_allocator.allocate();
102 #if !ASSERT_DISABLED
103     m_topSegment->m_top = 0;
104 #endif
105     m_topSegment->m_previous = 0;
106 }
107
108 MarkStackArray::~MarkStackArray()
109 {
110     ASSERT(!m_topSegment->m_previous);
111     m_allocator.release(m_topSegment);
112 }
113
114 void MarkStackArray::expand()
115 {
116     ASSERT(m_topSegment->m_top == m_segmentCapacity);
117     
118     m_numberOfPreviousSegments++;
119     
120     MarkStackSegment* nextSegment = m_allocator.allocate();
121 #if !ASSERT_DISABLED
122     nextSegment->m_top = 0;
123 #endif
124     nextSegment->m_previous = m_topSegment;
125     m_topSegment = nextSegment;
126     setTopForEmptySegment();
127     validatePrevious();
128 }
129
130 bool MarkStackArray::refill()
131 {
132     validatePrevious();
133     if (top())
134         return true;
135     MarkStackSegment* toFree = m_topSegment;
136     MarkStackSegment* previous = m_topSegment->m_previous;
137     if (!previous)
138         return false;
139     ASSERT(m_numberOfPreviousSegments);
140     m_numberOfPreviousSegments--;
141     m_topSegment = previous;
142     m_allocator.release(toFree);
143     setTopForFullSegment();
144     validatePrevious();
145     return true;
146 }
147
148 void MarkStackArray::donateSomeCellsTo(MarkStackArray& other)
149 {
150     // Try to donate about 1 / 2 of our cells. To reduce copying costs,
151     // we prefer donating whole segments over donating individual cells,
152     // even if this skews away from our 1 / 2 target.
153
154     ASSERT(m_segmentCapacity == other.m_segmentCapacity);
155
156     size_t segmentsToDonate = (m_numberOfPreviousSegments + 2 - 1) / 2; // Round up to donate 1 / 1 previous segments.
157
158     if (!segmentsToDonate) {
159         size_t cellsToDonate = m_top / 2; // Round down to donate 0 / 1 cells.
160         while (cellsToDonate--) {
161             ASSERT(m_top);
162             other.append(removeLast());
163         }
164         return;
165     }
166
167     validatePrevious();
168     other.validatePrevious();
169
170     MarkStackSegment* previous = m_topSegment->m_previous;
171     while (segmentsToDonate--) {
172         ASSERT(previous);
173         ASSERT(m_numberOfPreviousSegments);
174
175         MarkStackSegment* current = previous;
176         previous = current->m_previous;
177             
178         current->m_previous = other.m_topSegment->m_previous;
179         other.m_topSegment->m_previous = current;
180             
181         m_numberOfPreviousSegments--;
182         other.m_numberOfPreviousSegments++;
183     }
184     m_topSegment->m_previous = previous;
185
186     validatePrevious();
187     other.validatePrevious();
188 }
189
190 void MarkStackArray::stealSomeCellsFrom(MarkStackArray& other, size_t idleThreadCount)
191 {
192     // Try to steal 1 / Nth of the shared array, where N is the number of idle threads.
193     // To reduce copying costs, we prefer stealing a whole segment over stealing
194     // individual cells, even if this skews away from our 1 / N target.
195
196     ASSERT(m_segmentCapacity == other.m_segmentCapacity);
197     validatePrevious();
198     other.validatePrevious();
199         
200     // If other has an entire segment, steal it and return.
201     if (other.m_topSegment->m_previous) {
202         ASSERT(other.m_topSegment->m_previous->m_top == m_segmentCapacity);
203             
204         // First remove a segment from other.
205         MarkStackSegment* current = other.m_topSegment->m_previous;
206         other.m_topSegment->m_previous = current->m_previous;
207         other.m_numberOfPreviousSegments--;
208             
209         ASSERT(!!other.m_numberOfPreviousSegments == !!other.m_topSegment->m_previous);
210             
211         // Now add it to this.
212         current->m_previous = m_topSegment->m_previous;
213         m_topSegment->m_previous = current;
214         m_numberOfPreviousSegments++;
215             
216         validatePrevious();
217         other.validatePrevious();
218         return;
219     }
220
221     size_t numberOfCellsToSteal = (other.size() + idleThreadCount - 1) / idleThreadCount; // Round up to steal 1 / 1.
222     while (numberOfCellsToSteal-- > 0 && other.canRemoveLast())
223         append(other.removeLast());
224 }
225
226 MarkStack::MarkStack(GCThreadSharedData& shared)
227     : m_stack(shared.m_segmentAllocator)
228 #if !ASSERT_DISABLED
229     , m_isCheckingForDefaultMarkViolation(false)
230     , m_isDraining(false)
231 #endif
232     , m_visitCount(0)
233     , m_isInParallelMode(false)
234     , m_shared(shared)
235     , m_shouldHashConst(false)
236 {
237 }
238
239 MarkStack::~MarkStack()
240 {
241     ASSERT(m_stack.isEmpty());
242 }
243
244 void MarkStack::setup()
245 {
246     m_shared.m_shouldHashConst = m_shared.m_globalData->haveEnoughNewStringsToHashConst();
247     m_shouldHashConst = m_shared.m_shouldHashConst;
248 #if ENABLE(PARALLEL_GC)
249     for (unsigned i = 0; i < m_shared.m_markingThreadsMarkStack.size(); ++i)
250         m_shared.m_markingThreadsMarkStack[i]->m_shouldHashConst = m_shared.m_shouldHashConst;
251 #endif
252 }
253
254 void MarkStack::reset()
255 {
256     m_visitCount = 0;
257     ASSERT(m_stack.isEmpty());
258 #if ENABLE(PARALLEL_GC)
259     ASSERT(m_opaqueRoots.isEmpty()); // Should have merged by now.
260 #else
261     m_opaqueRoots.clear();
262 #endif
263     if (m_shouldHashConst) {
264         m_uniqueStrings.clear();
265         m_shouldHashConst = false;
266     }
267 }
268
269 void MarkStack::append(ConservativeRoots& conservativeRoots)
270 {
271     JSCell** roots = conservativeRoots.roots();
272     size_t size = conservativeRoots.size();
273     for (size_t i = 0; i < size; ++i)
274         internalAppend(roots[i]);
275 }
276
277 ALWAYS_INLINE static void visitChildren(SlotVisitor& visitor, const JSCell* cell)
278 {
279 #if ENABLE(SIMPLE_HEAP_PROFILING)
280     m_visitedTypeCounts.count(cell);
281 #endif
282
283     ASSERT(Heap::isMarked(cell));
284     
285     if (isJSString(cell)) {
286         JSString::visitChildren(const_cast<JSCell*>(cell), visitor);
287         return;
288     }
289
290     if (isJSFinalObject(cell)) {
291         JSFinalObject::visitChildren(const_cast<JSCell*>(cell), visitor);
292         return;
293     }
294
295     if (isJSArray(cell)) {
296         JSArray::visitChildren(const_cast<JSCell*>(cell), visitor);
297         return;
298     }
299
300     cell->methodTable()->visitChildren(const_cast<JSCell*>(cell), visitor);
301 }
302
303 void SlotVisitor::donateKnownParallel()
304 {
305     // NOTE: Because we re-try often, we can afford to be conservative, and
306     // assume that donating is not profitable.
307
308     // Avoid locking when a thread reaches a dead end in the object graph.
309     if (m_stack.size() < 2)
310         return;
311
312     // If there's already some shared work queued up, be conservative and assume
313     // that donating more is not profitable.
314     if (m_shared.m_sharedMarkStack.size())
315         return;
316
317     // If we're contending on the lock, be conservative and assume that another
318     // thread is already donating.
319     MutexTryLocker locker(m_shared.m_markingLock);
320     if (!locker.locked())
321         return;
322
323     // Otherwise, assume that a thread will go idle soon, and donate.
324     m_stack.donateSomeCellsTo(m_shared.m_sharedMarkStack);
325
326     if (m_shared.m_numberOfActiveParallelMarkers < Options::numberOfGCMarkers())
327         m_shared.m_markingCondition.broadcast();
328 }
329
330 void SlotVisitor::drain()
331 {
332     ASSERT(m_isInParallelMode);
333    
334 #if ENABLE(PARALLEL_GC)
335     if (Options::numberOfGCMarkers() > 1) {
336         while (!m_stack.isEmpty()) {
337             m_stack.refill();
338             for (unsigned countdown = Options::minimumNumberOfScansBetweenRebalance(); m_stack.canRemoveLast() && countdown--;)
339                 visitChildren(*this, m_stack.removeLast());
340             donateKnownParallel();
341         }
342         
343         mergeOpaqueRootsIfNecessary();
344         return;
345     }
346 #endif
347     
348     while (!m_stack.isEmpty()) {
349         m_stack.refill();
350         while (m_stack.canRemoveLast())
351             visitChildren(*this, m_stack.removeLast());
352     }
353 }
354
355 void SlotVisitor::drainFromShared(SharedDrainMode sharedDrainMode)
356 {
357     ASSERT(m_isInParallelMode);
358     
359     ASSERT(Options::numberOfGCMarkers());
360     
361     bool shouldBeParallel;
362
363 #if ENABLE(PARALLEL_GC)
364     shouldBeParallel = Options::numberOfGCMarkers() > 1;
365 #else
366     ASSERT(Options::numberOfGCMarkers() == 1);
367     shouldBeParallel = false;
368 #endif
369     
370     if (!shouldBeParallel) {
371         // This call should be a no-op.
372         ASSERT_UNUSED(sharedDrainMode, sharedDrainMode == MasterDrain);
373         ASSERT(m_stack.isEmpty());
374         ASSERT(m_shared.m_sharedMarkStack.isEmpty());
375         return;
376     }
377     
378 #if ENABLE(PARALLEL_GC)
379     {
380         MutexLocker locker(m_shared.m_markingLock);
381         m_shared.m_numberOfActiveParallelMarkers++;
382     }
383     while (true) {
384         {
385             MutexLocker locker(m_shared.m_markingLock);
386             m_shared.m_numberOfActiveParallelMarkers--;
387
388             // How we wait differs depending on drain mode.
389             if (sharedDrainMode == MasterDrain) {
390                 // Wait until either termination is reached, or until there is some work
391                 // for us to do.
392                 while (true) {
393                     // Did we reach termination?
394                     if (!m_shared.m_numberOfActiveParallelMarkers && m_shared.m_sharedMarkStack.isEmpty()) {
395                         // Let any sleeping slaves know it's time for them to give their private CopiedBlocks back
396                         m_shared.m_markingCondition.broadcast();
397                         return;
398                     }
399                     
400                     // Is there work to be done?
401                     if (!m_shared.m_sharedMarkStack.isEmpty())
402                         break;
403                     
404                     // Otherwise wait.
405                     m_shared.m_markingCondition.wait(m_shared.m_markingLock);
406                 }
407             } else {
408                 ASSERT(sharedDrainMode == SlaveDrain);
409                 
410                 // Did we detect termination? If so, let the master know.
411                 if (!m_shared.m_numberOfActiveParallelMarkers && m_shared.m_sharedMarkStack.isEmpty())
412                     m_shared.m_markingCondition.broadcast();
413                 
414                 while (m_shared.m_sharedMarkStack.isEmpty() && !m_shared.m_parallelMarkersShouldExit) {
415                     if (!m_shared.m_numberOfActiveParallelMarkers && m_shared.m_sharedMarkStack.isEmpty())
416                         doneCopying();
417                     m_shared.m_markingCondition.wait(m_shared.m_markingLock);
418                 }
419                 
420                 // Is the VM exiting? If so, exit this thread.
421                 if (m_shared.m_parallelMarkersShouldExit) {
422                     doneCopying();
423                     return;
424                 }
425             }
426            
427             size_t idleThreadCount = Options::numberOfGCMarkers() - m_shared.m_numberOfActiveParallelMarkers;
428             m_stack.stealSomeCellsFrom(m_shared.m_sharedMarkStack, idleThreadCount);
429             m_shared.m_numberOfActiveParallelMarkers++;
430         }
431         
432         drain();
433     }
434 #endif
435 }
436
437 void MarkStack::mergeOpaqueRoots()
438 {
439     ASSERT(!m_opaqueRoots.isEmpty()); // Should only be called when opaque roots are non-empty.
440     {
441         MutexLocker locker(m_shared.m_opaqueRootsLock);
442         HashSet<void*>::iterator begin = m_opaqueRoots.begin();
443         HashSet<void*>::iterator end = m_opaqueRoots.end();
444         for (HashSet<void*>::iterator iter = begin; iter != end; ++iter)
445             m_shared.m_opaqueRoots.add(*iter);
446     }
447     m_opaqueRoots.clear();
448 }
449
450 void SlotVisitor::startCopying()
451 {
452     ASSERT(!m_copiedAllocator.isValid());
453 }
454
455 void* SlotVisitor::allocateNewSpaceSlow(size_t bytes)
456 {
457     m_shared.m_copiedSpace->doneFillingBlock(m_copiedAllocator.resetCurrentBlock());
458     m_copiedAllocator.setCurrentBlock(m_shared.m_copiedSpace->allocateBlockForCopyingPhase());
459
460     void* result = 0;
461     CheckedBoolean didSucceed = m_copiedAllocator.tryAllocate(bytes, &result);
462     ASSERT(didSucceed);
463     return result;
464 }
465
466 void* SlotVisitor::allocateNewSpaceOrPin(void* ptr, size_t bytes)
467 {
468     if (!checkIfShouldCopyAndPinOtherwise(ptr, bytes))
469         return 0;
470     
471     return allocateNewSpace(bytes);
472 }
473
474 ALWAYS_INLINE bool JSString::tryHashConstLock()
475 {
476 #if ENABLE(PARALLEL_GC)
477     unsigned currentFlags = m_flags;
478
479     if (currentFlags & HashConstLock)
480         return false;
481
482     unsigned newFlags = currentFlags | HashConstLock;
483
484     if (!WTF::weakCompareAndSwap(&m_flags, currentFlags, newFlags))
485         return false;
486
487     WTF::memoryBarrierAfterLock();
488     return true;
489 #else
490     if (isHashConstSingleton())
491         return false;
492
493     m_flags |= HashConstLock;
494
495     return true;
496 #endif
497 }
498
499 ALWAYS_INLINE void JSString::releaseHashConstLock()
500 {
501 #if ENABLE(PARALLEL_GC)
502     WTF::memoryBarrierBeforeUnlock();
503 #endif
504     m_flags &= ~HashConstLock;
505 }
506
507 ALWAYS_INLINE bool JSString::shouldTryHashConst()
508 {
509     return ((length() > 1) && !isRope() && !isHashConstSingleton());
510 }
511
512 ALWAYS_INLINE void MarkStack::internalAppend(JSValue* slot)
513 {
514     // This internalAppend is only intended for visits to object and array backing stores.
515     // as it can change the JSValue pointed to be the argument when the original JSValue
516     // is a string that contains the same contents as another string.
517
518     ASSERT(slot);
519     JSValue value = *slot;
520     ASSERT(value);
521     if (!value.isCell())
522         return;
523
524     JSCell* cell = value.asCell();
525     if (!cell)
526         return;
527
528     if (m_shouldHashConst && cell->isString()) {
529         JSString* string = jsCast<JSString*>(cell);
530         if (string->shouldTryHashConst() && string->tryHashConstLock()) {
531             UniqueStringMap::AddResult addResult = m_uniqueStrings.add(string->string().impl(), value);
532             if (addResult.isNewEntry)
533                 string->setHashConstSingleton();
534             else {
535                 JSValue existingJSValue = addResult.iterator->second;
536                 if (value != existingJSValue)
537                     jsCast<JSString*>(existingJSValue.asCell())->clearHashConstSingleton();
538                 *slot = existingJSValue;
539                 string->releaseHashConstLock();
540                 return;
541             }
542             string->releaseHashConstLock();
543         }
544     }
545
546     internalAppend(cell);
547 }
548
549 void SlotVisitor::copyAndAppend(void** ptr, size_t bytes, JSValue* values, unsigned length)
550 {
551     void* oldPtr = *ptr;
552     void* newPtr = allocateNewSpaceOrPin(oldPtr, bytes);
553     if (newPtr) {
554         size_t jsValuesOffset = static_cast<size_t>(reinterpret_cast<char*>(values) - static_cast<char*>(oldPtr));
555
556         JSValue* newValues = reinterpret_cast_ptr<JSValue*>(static_cast<char*>(newPtr) + jsValuesOffset);
557         for (unsigned i = 0; i < length; i++) {
558             JSValue& value = values[i];
559             newValues[i] = value;
560             if (!value)
561                 continue;
562             internalAppend(&newValues[i]);
563         }
564
565         memcpy(newPtr, oldPtr, jsValuesOffset);
566         *ptr = newPtr;
567     } else
568         append(values, length);
569 }
570     
571 void SlotVisitor::doneCopying()
572 {
573     if (!m_copiedAllocator.isValid())
574         return;
575
576     m_shared.m_copiedSpace->doneFillingBlock(m_copiedAllocator.resetCurrentBlock());
577 }
578
579 void SlotVisitor::harvestWeakReferences()
580 {
581     for (WeakReferenceHarvester* current = m_shared.m_weakReferenceHarvesters.head(); current; current = current->next())
582         current->visitWeakReferences(*this);
583 }
584
585 void SlotVisitor::finalizeUnconditionalFinalizers()
586 {
587     while (m_shared.m_unconditionalFinalizers.hasNext())
588         m_shared.m_unconditionalFinalizers.removeNext()->finalizeUnconditionally();
589 }
590
591 #if ENABLE(GC_VALIDATION)
592 void MarkStack::validate(JSCell* cell)
593 {
594     if (!cell) {
595         dataLog("cell is NULL\n");
596         CRASH();
597     }
598
599     if (!cell->structure()) {
600         dataLog("cell at %p has a null structure\n" , cell);
601         CRASH();
602     }
603
604     // Both the cell's structure, and the cell's structure's structure should be the Structure Structure.
605     // I hate this sentence.
606     if (cell->structure()->structure()->JSCell::classInfo() != cell->structure()->JSCell::classInfo()) {
607         const char* parentClassName = 0;
608         const char* ourClassName = 0;
609         if (cell->structure()->structure() && cell->structure()->structure()->JSCell::classInfo())
610             parentClassName = cell->structure()->structure()->JSCell::classInfo()->className;
611         if (cell->structure()->JSCell::classInfo())
612             ourClassName = cell->structure()->JSCell::classInfo()->className;
613         dataLog("parent structure (%p <%s>) of cell at %p doesn't match cell's structure (%p <%s>)\n",
614                 cell->structure()->structure(), parentClassName, cell, cell->structure(), ourClassName);
615         CRASH();
616     }
617 }
618 #else
619 void MarkStack::validate(JSCell*)
620 {
621 }
622 #endif
623
624 } // namespace JSC