Convert GCThreadSharedData over to STL threading primitives
authorandersca@apple.com <andersca@apple.com@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Mon, 20 Jan 2014 17:10:36 +0000 (17:10 +0000)
committerandersca@apple.com <andersca@apple.com@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Mon, 20 Jan 2014 17:10:36 +0000 (17:10 +0000)
https://bugs.webkit.org/show_bug.cgi?id=127256

Reviewed by Andreas Kling.

* heap/GCThread.cpp:
(JSC::GCThread::waitForNextPhase):
(JSC::GCThread::gcThreadMain):
* heap/GCThreadSharedData.cpp:
(JSC::GCThreadSharedData::GCThreadSharedData):
(JSC::GCThreadSharedData::~GCThreadSharedData):
(JSC::GCThreadSharedData::startNextPhase):
(JSC::GCThreadSharedData::endCurrentPhase):
(JSC::GCThreadSharedData::didStartMarking):
(JSC::GCThreadSharedData::didFinishMarking):
* heap/GCThreadSharedData.h:
* heap/SlotVisitor.cpp:
(JSC::SlotVisitor::donateKnownParallel):
(JSC::SlotVisitor::drainFromShared):

git-svn-id: https://svn.webkit.org/repository/webkit/trunk@162352 268f45cc-cd09-0410-ab3c-d52691b4dbfc

Source/JavaScriptCore/ChangeLog
Source/JavaScriptCore/heap/GCThread.cpp
Source/JavaScriptCore/heap/GCThreadSharedData.cpp
Source/JavaScriptCore/heap/GCThreadSharedData.h
Source/JavaScriptCore/heap/SlotVisitor.cpp

index ca484fe..aa4ab9f 100644 (file)
@@ -1,3 +1,25 @@
+2014-01-19  Anders Carlsson  <andersca@apple.com>
+
+        Convert GCThreadSharedData over to STL threading primitives
+        https://bugs.webkit.org/show_bug.cgi?id=127256
+
+        Reviewed by Andreas Kling.
+
+        * heap/GCThread.cpp:
+        (JSC::GCThread::waitForNextPhase):
+        (JSC::GCThread::gcThreadMain):
+        * heap/GCThreadSharedData.cpp:
+        (JSC::GCThreadSharedData::GCThreadSharedData):
+        (JSC::GCThreadSharedData::~GCThreadSharedData):
+        (JSC::GCThreadSharedData::startNextPhase):
+        (JSC::GCThreadSharedData::endCurrentPhase):
+        (JSC::GCThreadSharedData::didStartMarking):
+        (JSC::GCThreadSharedData::didFinishMarking):
+        * heap/GCThreadSharedData.h:
+        * heap/SlotVisitor.cpp:
+        (JSC::SlotVisitor::donateKnownParallel):
+        (JSC::SlotVisitor::drainFromShared):
+
 2014-01-18  Andreas Kling  <akling@apple.com>
 
         CodeBlock: Size m_callLinkInfos and m_byValInfos to fit earlier.
index aa868f1..50f02ce 100644 (file)
@@ -69,16 +69,14 @@ CopyVisitor* GCThread::copyVisitor()
 
 GCPhase GCThread::waitForNextPhase()
 {
-    MutexLocker locker(m_shared.m_phaseLock);
-    while (m_shared.m_gcThreadsShouldWait)
-        m_shared.m_phaseCondition.wait(m_shared.m_phaseLock);
+    std::unique_lock<std::mutex> lock(m_shared.m_phaseMutex);
+    m_shared.m_phaseConditionVariable.wait(lock, [this] { return !m_shared.m_gcThreadsShouldWait; });
 
     m_shared.m_numberOfActiveGCThreads--;
     if (!m_shared.m_numberOfActiveGCThreads)
-        m_shared.m_activityCondition.signal();
+        m_shared.m_activityConditionVariable.notify_one();
 
-    while (m_shared.m_currentPhase == NoPhase)
-        m_shared.m_phaseCondition.wait(m_shared.m_phaseLock);
+    m_shared.m_phaseConditionVariable.wait(lock, [this] { return m_shared.m_currentPhase != NoPhase; });
     m_shared.m_numberOfActiveGCThreads++;
     return m_shared.m_currentPhase;
 }
@@ -92,7 +90,7 @@ void GCThread::gcThreadMain()
     // Wait for the main thread to finish creating and initializing us. The main thread grabs this lock before 
     // creating this thread. We aren't guaranteed to have a valid threadID until the main thread releases this lock.
     {
-        MutexLocker locker(m_shared.m_phaseLock);
+        std::lock_guard<std::mutex> lock(m_shared.m_phaseMutex);
     }
     {
         ParallelModeEnabler enabler(*m_slotVisitor);
index 7d1d7b4..09143a1 100644 (file)
@@ -83,7 +83,7 @@ GCThreadSharedData::GCThreadSharedData(VM* vm)
     m_copyLock.Init();
 #if ENABLE(PARALLEL_GC)
     // Grab the lock so the new GC threads can be properly initialized before they start running.
-    MutexLocker locker(m_phaseLock);
+    std::unique_lock<std::mutex> lock(m_phaseMutex);
     for (unsigned i = 1; i < Options::numberOfGCMarkers(); ++i) {
         m_numberOfActiveGCThreads++;
         SlotVisitor* slotVisitor = new SlotVisitor(*this);
@@ -95,8 +95,7 @@ GCThreadSharedData::GCThreadSharedData(VM* vm)
     }
 
     // Wait for all the GCThreads to get to the right place.
-    while (m_numberOfActiveGCThreads)
-        m_activityCondition.wait(m_phaseLock);
+    m_activityConditionVariable.wait(lock, [this] { return !m_numberOfActiveGCThreads; });
 #endif
 }
 
@@ -105,13 +104,13 @@ GCThreadSharedData::~GCThreadSharedData()
 #if ENABLE(PARALLEL_GC)    
     // Destroy our marking threads.
     {
-        MutexLocker markingLocker(m_markingLock);
-        MutexLocker phaseLocker(m_phaseLock);
+        std::lock_guard<std::mutex> markingLock(m_markingMutex);
+        std::lock_guard<std::mutex> phaseLock(m_phaseMutex);
         ASSERT(m_currentPhase == NoPhase);
         m_parallelMarkersShouldExit = true;
         m_gcThreadsShouldWait = false;
         m_currentPhase = Exit;
-        m_phaseCondition.broadcast();
+        m_phaseConditionVariable.notify_all();
     }
     for (unsigned i = 0; i < m_gcThreads.size(); ++i) {
         waitForThreadCompletion(m_gcThreads[i]->threadID());
@@ -139,28 +138,27 @@ void GCThreadSharedData::reset()
 
 void GCThreadSharedData::startNextPhase(GCPhase phase)
 {
-    MutexLocker phaseLocker(m_phaseLock);
+    std::lock_guard<std::mutex> lock(m_phaseMutex);
     ASSERT(!m_gcThreadsShouldWait);
     ASSERT(m_currentPhase == NoPhase);
     m_gcThreadsShouldWait = true;
     m_currentPhase = phase;
-    m_phaseCondition.broadcast();
+    m_phaseConditionVariable.notify_all();
 }
 
 void GCThreadSharedData::endCurrentPhase()
 {
     ASSERT(m_gcThreadsShouldWait);
-    MutexLocker locker(m_phaseLock);
+    std::unique_lock<std::mutex> lock(m_phaseMutex);
     m_currentPhase = NoPhase;
     m_gcThreadsShouldWait = false;
-    m_phaseCondition.broadcast();
-    while (m_numberOfActiveGCThreads)
-        m_activityCondition.wait(m_phaseLock);
+    m_phaseConditionVariable.notify_all();
+    m_activityConditionVariable.wait(lock, [this] { return !m_numberOfActiveGCThreads; });
 }
 
 void GCThreadSharedData::didStartMarking()
 {
-    MutexLocker markingLocker(m_markingLock);
+    std::lock_guard<std::mutex> lock(m_markingMutex);
     m_parallelMarkersShouldExit = false;
     startNextPhase(Mark);
 }
@@ -168,9 +166,9 @@ void GCThreadSharedData::didStartMarking()
 void GCThreadSharedData::didFinishMarking()
 {
     {
-        MutexLocker markingLocker(m_markingLock);
+        std::lock_guard<std::mutex> lock(m_markingMutex);
         m_parallelMarkersShouldExit = true;
-        m_markingCondition.broadcast();
+        m_markingConditionVariable.notify_all();
     }
 
     ASSERT(m_currentPhase == Mark);
index ff3dd99..915c2c9 100644 (file)
@@ -31,9 +31,9 @@
 #include "MarkedBlock.h"
 #include "UnconditionalFinalizer.h"
 #include "WeakReferenceHarvester.h"
+#include <condition_variable>
 #include <wtf/HashSet.h>
 #include <wtf/TCSpinLock.h>
-#include <wtf/Threading.h>
 #include <wtf/Vector.h>
 
 namespace JSC {
@@ -86,8 +86,8 @@ private:
 
     Vector<GCThread*> m_gcThreads;
 
-    Mutex m_markingLock;
-    ThreadCondition m_markingCondition;
+    std::mutex m_markingMutex;
+    std::condition_variable m_markingConditionVariable;
     MarkStackArray m_sharedMarkStack;
     unsigned m_numberOfActiveParallelMarkers;
     bool m_parallelMarkersShouldExit;
@@ -100,9 +100,9 @@ private:
     size_t m_copyIndex;
     static const size_t s_blockFragmentLength = 32;
 
-    Mutex m_phaseLock;
-    ThreadCondition m_phaseCondition;
-    ThreadCondition m_activityCondition;
+    std::mutex m_phaseMutex;
+    std::condition_variable m_phaseConditionVariable;
+    std::condition_variable m_activityConditionVariable;
     unsigned m_numberOfActiveGCThreads;
     bool m_gcThreadsShouldWait;
     GCPhase m_currentPhase;
index 05fb001..4fd0da7 100644 (file)
@@ -118,15 +118,15 @@ void SlotVisitor::donateKnownParallel()
 
     // If we're contending on the lock, be conservative and assume that another
     // thread is already donating.
-    MutexTryLocker locker(m_shared.m_markingLock);
-    if (!locker.locked())
+    std::unique_lock<std::mutex> lock(m_shared.m_markingMutex, std::try_to_lock);
+    if (!lock.owns_lock())
         return;
 
     // Otherwise, assume that a thread will go idle soon, and donate.
     m_stack.donateSomeCellsTo(m_shared.m_sharedMarkStack);
 
     if (m_shared.m_numberOfActiveParallelMarkers < Options::numberOfGCMarkers())
-        m_shared.m_markingCondition.broadcast();
+        m_shared.m_markingConditionVariable.notify_all();
 }
 
 void SlotVisitor::drain()
@@ -181,12 +181,12 @@ void SlotVisitor::drainFromShared(SharedDrainMode sharedDrainMode)
     
 #if ENABLE(PARALLEL_GC)
     {
-        MutexLocker locker(m_shared.m_markingLock);
+        std::lock_guard<std::mutex> lock(m_shared.m_markingMutex);
         m_shared.m_numberOfActiveParallelMarkers++;
     }
     while (true) {
         {
-            MutexLocker locker(m_shared.m_markingLock);
+            std::unique_lock<std::mutex> lock(m_shared.m_markingMutex);
             m_shared.m_numberOfActiveParallelMarkers--;
 
             // How we wait differs depending on drain mode.
@@ -197,7 +197,7 @@ void SlotVisitor::drainFromShared(SharedDrainMode sharedDrainMode)
                     // Did we reach termination?
                     if (!m_shared.m_numberOfActiveParallelMarkers && m_shared.m_sharedMarkStack.isEmpty()) {
                         // Let any sleeping slaves know it's time for them to return;
-                        m_shared.m_markingCondition.broadcast();
+                        m_shared.m_markingConditionVariable.notify_all();
                         return;
                     }
                     
@@ -206,17 +206,16 @@ void SlotVisitor::drainFromShared(SharedDrainMode sharedDrainMode)
                         break;
                     
                     // Otherwise wait.
-                    m_shared.m_markingCondition.wait(m_shared.m_markingLock);
+                    m_shared.m_markingConditionVariable.wait(lock);
                 }
             } else {
                 ASSERT(sharedDrainMode == SlaveDrain);
                 
                 // Did we detect termination? If so, let the master know.
                 if (!m_shared.m_numberOfActiveParallelMarkers && m_shared.m_sharedMarkStack.isEmpty())
-                    m_shared.m_markingCondition.broadcast();
-                
-                while (m_shared.m_sharedMarkStack.isEmpty() && !m_shared.m_parallelMarkersShouldExit)
-                    m_shared.m_markingCondition.wait(m_shared.m_markingLock);
+                    m_shared.m_markingConditionVariable.notify_all();
+
+                m_shared.m_markingConditionVariable.wait(lock, [this] { return !m_shared.m_sharedMarkStack.isEmpty() || m_shared.m_parallelMarkersShouldExit; });
                 
                 // Is the current phase done? If so, return from this function.
                 if (m_shared.m_parallelMarkersShouldExit)