Use WTF::Lock and WTF::Condition instead of WTF::Mutex, WTF::ThreadCondition, std...
authorfpizlo@apple.com <fpizlo@apple.com@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Fri, 14 Aug 2015 06:46:46 +0000 (06:46 +0000)
committerfpizlo@apple.com <fpizlo@apple.com@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Fri, 14 Aug 2015 06:46:46 +0000 (06:46 +0000)
https://bugs.webkit.org/show_bug.cgi?id=147999

Reviewed by Geoffrey Garen.

Source/JavaScriptCore:

* API/JSVirtualMachine.mm:
(initWrapperCache):
(+[JSVMWrapperCache addWrapper:forJSContextGroupRef:]):
(+[JSVMWrapperCache wrapperForJSContextGroupRef:]):
(wrapperCacheMutex): Deleted.
* bytecode/SamplingTool.cpp:
(JSC::SamplingTool::doRun):
(JSC::SamplingTool::notifyOfScope):
* bytecode/SamplingTool.h:
* dfg/DFGThreadData.h:
* dfg/DFGWorklist.cpp:
(JSC::DFG::Worklist::~Worklist):
(JSC::DFG::Worklist::isActiveForVM):
(JSC::DFG::Worklist::enqueue):
(JSC::DFG::Worklist::compilationState):
(JSC::DFG::Worklist::waitUntilAllPlansForVMAreReady):
(JSC::DFG::Worklist::removeAllReadyPlansForVM):
(JSC::DFG::Worklist::completeAllReadyPlansForVM):
(JSC::DFG::Worklist::visitWeakReferences):
(JSC::DFG::Worklist::removeDeadPlans):
(JSC::DFG::Worklist::queueLength):
(JSC::DFG::Worklist::dump):
(JSC::DFG::Worklist::runThread):
* dfg/DFGWorklist.h:
* disassembler/Disassembler.cpp:
* heap/CopiedSpace.cpp:
(JSC::CopiedSpace::doneFillingBlock):
(JSC::CopiedSpace::doneCopying):
* heap/CopiedSpace.h:
* heap/CopiedSpaceInlines.h:
(JSC::CopiedSpace::recycleBorrowedBlock):
(JSC::CopiedSpace::allocateBlockForCopyingPhase):
* heap/GCThread.cpp:
(JSC::GCThread::waitForNextPhase):
(JSC::GCThread::gcThreadMain):
* heap/GCThreadSharedData.cpp:
(JSC::GCThreadSharedData::GCThreadSharedData):
(JSC::GCThreadSharedData::~GCThreadSharedData):
(JSC::GCThreadSharedData::startNextPhase):
(JSC::GCThreadSharedData::endCurrentPhase):
(JSC::GCThreadSharedData::didStartMarking):
(JSC::GCThreadSharedData::didFinishMarking):
* heap/GCThreadSharedData.h:
* heap/HeapTimer.h:
* heap/MachineStackMarker.cpp:
(JSC::ActiveMachineThreadsManager::Locker::Locker):
(JSC::ActiveMachineThreadsManager::add):
(JSC::ActiveMachineThreadsManager::remove):
(JSC::ActiveMachineThreadsManager::ActiveMachineThreadsManager):
(JSC::MachineThreads::~MachineThreads):
(JSC::MachineThreads::addCurrentThread):
(JSC::MachineThreads::removeThreadIfFound):
(JSC::MachineThreads::tryCopyOtherThreadStack):
(JSC::MachineThreads::tryCopyOtherThreadStacks):
(JSC::MachineThreads::gatherConservativeRoots):
* heap/MachineStackMarker.h:
* heap/SlotVisitor.cpp:
(JSC::SlotVisitor::donateKnownParallel):
(JSC::SlotVisitor::drain):
(JSC::SlotVisitor::drainFromShared):
(JSC::SlotVisitor::mergeOpaqueRoots):
* heap/SlotVisitorInlines.h:
(JSC::SlotVisitor::containsOpaqueRootTriState):
* inspector/remote/RemoteInspectorDebuggableConnection.h:
* inspector/remote/RemoteInspectorDebuggableConnection.mm:
(Inspector::RemoteInspectorHandleRunSourceGlobal):
(Inspector::RemoteInspectorQueueTaskOnGlobalQueue):
(Inspector::RemoteInspectorInitializeGlobalQueue):
(Inspector::RemoteInspectorHandleRunSourceWithInfo):
(Inspector::RemoteInspectorDebuggableConnection::setup):
(Inspector::RemoteInspectorDebuggableConnection::closeFromDebuggable):
(Inspector::RemoteInspectorDebuggableConnection::close):
(Inspector::RemoteInspectorDebuggableConnection::sendMessageToBackend):
(Inspector::RemoteInspectorDebuggableConnection::queueTaskOnPrivateRunLoop):
* interpreter/JSStack.cpp:
(JSC::JSStack::JSStack):
(JSC::JSStack::releaseExcessCapacity):
(JSC::JSStack::addToCommittedByteCount):
(JSC::JSStack::committedByteCount):
(JSC::stackStatisticsMutex): Deleted.
(JSC::JSStack::initializeThreading): Deleted.
* interpreter/JSStack.h:
(JSC::JSStack::gatherConservativeRoots):
(JSC::JSStack::sanitizeStack):
(JSC::JSStack::size):
(JSC::JSStack::initializeThreading): Deleted.
* jit/ExecutableAllocator.cpp:
(JSC::DemandExecutableAllocator::DemandExecutableAllocator):
(JSC::DemandExecutableAllocator::~DemandExecutableAllocator):
(JSC::DemandExecutableAllocator::bytesAllocatedByAllAllocators):
(JSC::DemandExecutableAllocator::bytesCommittedByAllocactors):
(JSC::DemandExecutableAllocator::dumpProfileFromAllAllocators):
(JSC::DemandExecutableAllocator::allocators):
(JSC::DemandExecutableAllocator::allocatorsMutex):
* jit/JITThunks.cpp:
(JSC::JITThunks::ctiStub):
* jit/JITThunks.h:
* profiler/ProfilerDatabase.cpp:
(JSC::Profiler::Database::ensureBytecodesFor):
(JSC::Profiler::Database::notifyDestruction):
* profiler/ProfilerDatabase.h:
* runtime/InitializeThreading.cpp:
(JSC::initializeThreading):
* runtime/JSLock.cpp:
(JSC::GlobalJSLock::GlobalJSLock):
(JSC::GlobalJSLock::~GlobalJSLock):
(JSC::JSLockHolder::JSLockHolder):
(JSC::GlobalJSLock::initialize): Deleted.
* runtime/JSLock.h:

Source/WTF:

* wtf/Condition.h: "using WTF::Condition".
* wtf/Lock.h:
(WTF::LockBase::lock):
(WTF::LockBase::tryLock): Add tryLock() because it turns out that we use it sometimes.
(WTF::LockBase::try_lock): unique_lock needs this.
(WTF::LockBase::unlock):

git-svn-id: https://svn.webkit.org/repository/webkit/trunk@188444 268f45cc-cd09-0410-ab3c-d52691b4dbfc

34 files changed:
Source/JavaScriptCore/API/JSVirtualMachine.mm
Source/JavaScriptCore/ChangeLog
Source/JavaScriptCore/bytecode/SamplingTool.cpp
Source/JavaScriptCore/bytecode/SamplingTool.h
Source/JavaScriptCore/dfg/DFGThreadData.h
Source/JavaScriptCore/dfg/DFGWorklist.cpp
Source/JavaScriptCore/dfg/DFGWorklist.h
Source/JavaScriptCore/disassembler/Disassembler.cpp
Source/JavaScriptCore/heap/CopiedSpace.cpp
Source/JavaScriptCore/heap/CopiedSpace.h
Source/JavaScriptCore/heap/CopiedSpaceInlines.h
Source/JavaScriptCore/heap/GCThread.cpp
Source/JavaScriptCore/heap/GCThreadSharedData.cpp
Source/JavaScriptCore/heap/GCThreadSharedData.h
Source/JavaScriptCore/heap/HeapTimer.h
Source/JavaScriptCore/heap/MachineStackMarker.cpp
Source/JavaScriptCore/heap/MachineStackMarker.h
Source/JavaScriptCore/heap/SlotVisitor.cpp
Source/JavaScriptCore/heap/SlotVisitorInlines.h
Source/JavaScriptCore/inspector/remote/RemoteInspectorDebuggableConnection.h
Source/JavaScriptCore/inspector/remote/RemoteInspectorDebuggableConnection.mm
Source/JavaScriptCore/interpreter/JSStack.cpp
Source/JavaScriptCore/interpreter/JSStack.h
Source/JavaScriptCore/jit/ExecutableAllocator.cpp
Source/JavaScriptCore/jit/JITThunks.cpp
Source/JavaScriptCore/jit/JITThunks.h
Source/JavaScriptCore/profiler/ProfilerDatabase.cpp
Source/JavaScriptCore/profiler/ProfilerDatabase.h
Source/JavaScriptCore/runtime/InitializeThreading.cpp
Source/JavaScriptCore/runtime/JSLock.cpp
Source/JavaScriptCore/runtime/JSLock.h
Source/WTF/ChangeLog
Source/WTF/wtf/Condition.h
Source/WTF/wtf/Lock.h

index d4995ad..9aa92d4 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2013, 2015 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
 #import "JSWrapperMap.h"
 #import "SlotVisitorInlines.h"
 #import <mutex>
+#import <wtf/Lock.h>
 #import <wtf/NeverDestroyed.h>
 #import <wtf/spi/cocoa/NSMapTableSPI.h>
 
 static NSMapTable *globalWrapperCache = 0;
 
-static std::mutex& wrapperCacheMutex()
-{
-    static NeverDestroyed<std::mutex> mutex;
-
-    return mutex;
-}
+static StaticLock wrapperCacheMutex;
 
 static void initWrapperCache()
 {
@@ -72,13 +68,13 @@ static NSMapTable *wrapperCache()
 
 + (void)addWrapper:(JSVirtualMachine *)wrapper forJSContextGroupRef:(JSContextGroupRef)group
 {
-    std::lock_guard<std::mutex> lock(wrapperCacheMutex());
+    std::lock_guard<StaticLock> lock(wrapperCacheMutex);
     NSMapInsert(wrapperCache(), group, wrapper);
 }
 
 + (JSVirtualMachine *)wrapperForJSContextGroupRef:(JSContextGroupRef)group
 {
-    std::lock_guard<std::mutex> lock(wrapperCacheMutex());
+    std::lock_guard<StaticLock> lock(wrapperCacheMutex);
     return static_cast<JSVirtualMachine *>(NSMapGet(wrapperCache(), group));
 }
 
index efd55bf..99adbf8 100644 (file)
@@ -1,3 +1,120 @@
+2015-08-13  Filip Pizlo  <fpizlo@apple.com>
+
+        Use WTF::Lock and WTF::Condition instead of WTF::Mutex, WTF::ThreadCondition, std::mutex, and std::condition_variable
+        https://bugs.webkit.org/show_bug.cgi?id=147999
+
+        Reviewed by Geoffrey Garen.
+
+        * API/JSVirtualMachine.mm:
+        (initWrapperCache):
+        (+[JSVMWrapperCache addWrapper:forJSContextGroupRef:]):
+        (+[JSVMWrapperCache wrapperForJSContextGroupRef:]):
+        (wrapperCacheMutex): Deleted.
+        * bytecode/SamplingTool.cpp:
+        (JSC::SamplingTool::doRun):
+        (JSC::SamplingTool::notifyOfScope):
+        * bytecode/SamplingTool.h:
+        * dfg/DFGThreadData.h:
+        * dfg/DFGWorklist.cpp:
+        (JSC::DFG::Worklist::~Worklist):
+        (JSC::DFG::Worklist::isActiveForVM):
+        (JSC::DFG::Worklist::enqueue):
+        (JSC::DFG::Worklist::compilationState):
+        (JSC::DFG::Worklist::waitUntilAllPlansForVMAreReady):
+        (JSC::DFG::Worklist::removeAllReadyPlansForVM):
+        (JSC::DFG::Worklist::completeAllReadyPlansForVM):
+        (JSC::DFG::Worklist::visitWeakReferences):
+        (JSC::DFG::Worklist::removeDeadPlans):
+        (JSC::DFG::Worklist::queueLength):
+        (JSC::DFG::Worklist::dump):
+        (JSC::DFG::Worklist::runThread):
+        * dfg/DFGWorklist.h:
+        * disassembler/Disassembler.cpp:
+        * heap/CopiedSpace.cpp:
+        (JSC::CopiedSpace::doneFillingBlock):
+        (JSC::CopiedSpace::doneCopying):
+        * heap/CopiedSpace.h:
+        * heap/CopiedSpaceInlines.h:
+        (JSC::CopiedSpace::recycleBorrowedBlock):
+        (JSC::CopiedSpace::allocateBlockForCopyingPhase):
+        * heap/GCThread.cpp:
+        (JSC::GCThread::waitForNextPhase):
+        (JSC::GCThread::gcThreadMain):
+        * heap/GCThreadSharedData.cpp:
+        (JSC::GCThreadSharedData::GCThreadSharedData):
+        (JSC::GCThreadSharedData::~GCThreadSharedData):
+        (JSC::GCThreadSharedData::startNextPhase):
+        (JSC::GCThreadSharedData::endCurrentPhase):
+        (JSC::GCThreadSharedData::didStartMarking):
+        (JSC::GCThreadSharedData::didFinishMarking):
+        * heap/GCThreadSharedData.h:
+        * heap/HeapTimer.h:
+        * heap/MachineStackMarker.cpp:
+        (JSC::ActiveMachineThreadsManager::Locker::Locker):
+        (JSC::ActiveMachineThreadsManager::add):
+        (JSC::ActiveMachineThreadsManager::remove):
+        (JSC::ActiveMachineThreadsManager::ActiveMachineThreadsManager):
+        (JSC::MachineThreads::~MachineThreads):
+        (JSC::MachineThreads::addCurrentThread):
+        (JSC::MachineThreads::removeThreadIfFound):
+        (JSC::MachineThreads::tryCopyOtherThreadStack):
+        (JSC::MachineThreads::tryCopyOtherThreadStacks):
+        (JSC::MachineThreads::gatherConservativeRoots):
+        * heap/MachineStackMarker.h:
+        * heap/SlotVisitor.cpp:
+        (JSC::SlotVisitor::donateKnownParallel):
+        (JSC::SlotVisitor::drain):
+        (JSC::SlotVisitor::drainFromShared):
+        (JSC::SlotVisitor::mergeOpaqueRoots):
+        * heap/SlotVisitorInlines.h:
+        (JSC::SlotVisitor::containsOpaqueRootTriState):
+        * inspector/remote/RemoteInspectorDebuggableConnection.h:
+        * inspector/remote/RemoteInspectorDebuggableConnection.mm:
+        (Inspector::RemoteInspectorHandleRunSourceGlobal):
+        (Inspector::RemoteInspectorQueueTaskOnGlobalQueue):
+        (Inspector::RemoteInspectorInitializeGlobalQueue):
+        (Inspector::RemoteInspectorHandleRunSourceWithInfo):
+        (Inspector::RemoteInspectorDebuggableConnection::setup):
+        (Inspector::RemoteInspectorDebuggableConnection::closeFromDebuggable):
+        (Inspector::RemoteInspectorDebuggableConnection::close):
+        (Inspector::RemoteInspectorDebuggableConnection::sendMessageToBackend):
+        (Inspector::RemoteInspectorDebuggableConnection::queueTaskOnPrivateRunLoop):
+        * interpreter/JSStack.cpp:
+        (JSC::JSStack::JSStack):
+        (JSC::JSStack::releaseExcessCapacity):
+        (JSC::JSStack::addToCommittedByteCount):
+        (JSC::JSStack::committedByteCount):
+        (JSC::stackStatisticsMutex): Deleted.
+        (JSC::JSStack::initializeThreading): Deleted.
+        * interpreter/JSStack.h:
+        (JSC::JSStack::gatherConservativeRoots):
+        (JSC::JSStack::sanitizeStack):
+        (JSC::JSStack::size):
+        (JSC::JSStack::initializeThreading): Deleted.
+        * jit/ExecutableAllocator.cpp:
+        (JSC::DemandExecutableAllocator::DemandExecutableAllocator):
+        (JSC::DemandExecutableAllocator::~DemandExecutableAllocator):
+        (JSC::DemandExecutableAllocator::bytesAllocatedByAllAllocators):
+        (JSC::DemandExecutableAllocator::bytesCommittedByAllocactors):
+        (JSC::DemandExecutableAllocator::dumpProfileFromAllAllocators):
+        (JSC::DemandExecutableAllocator::allocators):
+        (JSC::DemandExecutableAllocator::allocatorsMutex):
+        * jit/JITThunks.cpp:
+        (JSC::JITThunks::ctiStub):
+        * jit/JITThunks.h:
+        * profiler/ProfilerDatabase.cpp:
+        (JSC::Profiler::Database::ensureBytecodesFor):
+        (JSC::Profiler::Database::notifyDestruction):
+        * profiler/ProfilerDatabase.h:
+        * runtime/InitializeThreading.cpp:
+        (JSC::initializeThreading):
+        * runtime/JSLock.cpp:
+        (JSC::GlobalJSLock::GlobalJSLock):
+        (JSC::GlobalJSLock::~GlobalJSLock):
+        (JSC::JSLockHolder::JSLockHolder):
+        (JSC::GlobalJSLock::initialize): Deleted.
+        * runtime/JSLock.h:
+
 2015-08-13  Commit Queue  <commit-queue@webkit.org>
 
         Unreviewed, rolling out r188428.
index 12dbf24..f5bf2b7 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2008, 2009 Apple Inc. All rights reserved.
+ * Copyright (C) 2008, 2009, 2015 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -285,7 +285,7 @@ void SamplingTool::doRun()
 
 #if ENABLE(CODEBLOCK_SAMPLING)
     if (CodeBlock* codeBlock = sample.codeBlock()) {
-        MutexLocker locker(m_scriptSampleMapMutex);
+        LockHolder locker(m_scriptSampleMapMutex);
         ScriptSampleRecord* record = m_scopeSampleMap->get(codeBlock->ownerExecutable());
         ASSERT(record);
         record->sample(codeBlock, sample.vPC());
@@ -301,7 +301,7 @@ void SamplingTool::sample()
 void SamplingTool::notifyOfScope(VM& vm, ScriptExecutable* script)
 {
 #if ENABLE(CODEBLOCK_SAMPLING)
-    MutexLocker locker(m_scriptSampleMapMutex);
+    LockHolder locker(m_scriptSampleMapMutex);
     m_scopeSampleMap->set(script, adoptPtr(new ScriptSampleRecord(vm, script)));
 #else
     UNUSED_PARAM(vm);
index 44714c6..18e3483 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2008, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2008, 2013, 2015 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -35,6 +35,7 @@
 #include <wtf/Assertions.h>
 #include <wtf/Atomics.h>
 #include <wtf/HashMap.h>
+#include <wtf/Lock.h>
 #include <wtf/MainThread.h>
 #include <wtf/Spectrum.h>
 #include <wtf/Threading.h>
@@ -338,7 +339,7 @@ namespace JSC {
         unsigned m_opcodeSamplesInCTIFunctions[numOpcodeIDs];
         
 #if ENABLE(CODEBLOCK_SAMPLING)
-        Mutex m_scriptSampleMapMutex;
+        Lock m_scriptSampleMapMutex;
         std::unique_ptr<ScriptSampleRecordMap> m_scopeSampleMap;
 #endif
     };
index e9fc565..d86cf90 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2014 Apple Inc. All rights reserved.
+ * Copyright (C) 2014, 2015 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -28,8 +28,8 @@
 
 #if ENABLE(DFG_JIT)
 
+#include <wtf/Lock.h>
 #include <wtf/Threading.h>
-#include <wtf/ThreadingPrimitives.h>
 
 namespace JSC { namespace DFG {
 
@@ -48,7 +48,7 @@ private:
     
     Worklist* m_worklist;
     ThreadIdentifier m_identifier;
-    Mutex m_rightToRun;
+    Lock m_rightToRun;
     Safepoint* m_safepoint;
 };
 
index 4a8f572..f4bb709 100644 (file)
@@ -46,10 +46,10 @@ Worklist::Worklist(CString worklistName)
 Worklist::~Worklist()
 {
     {
-        MutexLocker locker(m_lock);
+        LockHolder locker(m_lock);
         for (unsigned i = m_threads.size(); i--;)
             m_queue.append(nullptr); // Use null plan to indicate that we want the thread to terminate.
-        m_planEnqueued.broadcast();
+        m_planEnqueued.notifyAll();
     }
     for (unsigned i = m_threads.size(); i--;)
         waitForThreadCompletion(m_threads[i]->m_identifier);
@@ -77,7 +77,7 @@ Ref<Worklist> Worklist::create(CString worklistName, unsigned numberOfThreads, i
 
 bool Worklist::isActiveForVM(VM& vm) const
 {
-    MutexLocker locker(m_lock);
+    LockHolder locker(m_lock);
     PlanMap::const_iterator end = m_plans.end();
     for (PlanMap::const_iterator iter = m_plans.begin(); iter != end; ++iter) {
         if (&iter->value->vm == &vm)
@@ -89,7 +89,7 @@ bool Worklist::isActiveForVM(VM& vm) const
 void Worklist::enqueue(PassRefPtr<Plan> passedPlan)
 {
     RefPtr<Plan> plan = passedPlan;
-    MutexLocker locker(m_lock);
+    LockHolder locker(m_lock);
     if (Options::verboseCompilationQueue()) {
         dump(locker, WTF::dataFile());
         dataLog(": Enqueueing plan to optimize ", plan->key(), "\n");
@@ -97,12 +97,12 @@ void Worklist::enqueue(PassRefPtr<Plan> passedPlan)
     ASSERT(m_plans.find(plan->key()) == m_plans.end());
     m_plans.add(plan->key(), plan);
     m_queue.append(plan);
-    m_planEnqueued.signal();
+    m_planEnqueued.notifyOne();
 }
 
 Worklist::State Worklist::compilationState(CompilationKey key)
 {
-    MutexLocker locker(m_lock);
+    LockHolder locker(m_lock);
     PlanMap::iterator iter = m_plans.find(key);
     if (iter == m_plans.end())
         return NotKnown;
@@ -118,7 +118,7 @@ void Worklist::waitUntilAllPlansForVMAreReady(VM& vm)
     // After we release this lock, we know that although other VMs may still
     // be adding plans, our VM will not be.
     
-    MutexLocker locker(m_lock);
+    LockHolder locker(m_lock);
     
     if (Options::verboseCompilationQueue()) {
         dump(locker, WTF::dataFile());
@@ -147,7 +147,7 @@ void Worklist::waitUntilAllPlansForVMAreReady(VM& vm)
 void Worklist::removeAllReadyPlansForVM(VM& vm, Vector<RefPtr<Plan>, 8>& myReadyPlans)
 {
     DeferGC deferGC(vm.heap);
-    MutexLocker locker(m_lock);
+    LockHolder locker(m_lock);
     for (size_t i = 0; i < m_readyPlans.size(); ++i) {
         RefPtr<Plan> plan = m_readyPlans[i];
         if (&plan->vm != &vm)
@@ -192,7 +192,7 @@ Worklist::State Worklist::completeAllReadyPlansForVM(VM& vm, CompilationKey requ
     }
     
     if (!!requestedKey && resultingState == NotKnown) {
-        MutexLocker locker(m_lock);
+        LockHolder locker(m_lock);
         if (m_plans.contains(requestedKey))
             resultingState = Compiling;
     }
@@ -225,7 +225,7 @@ void Worklist::visitWeakReferences(SlotVisitor& visitor, CodeBlockSet& codeBlock
 {
     VM* vm = visitor.heap()->vm();
     {
-        MutexLocker locker(m_lock);
+        LockHolder locker(m_lock);
         for (PlanMap::iterator iter = m_plans.begin(); iter != m_plans.end(); ++iter) {
             Plan* plan = iter->value.get();
             if (&plan->vm != vm)
@@ -248,7 +248,7 @@ void Worklist::visitWeakReferences(SlotVisitor& visitor, CodeBlockSet& codeBlock
 void Worklist::removeDeadPlans(VM& vm)
 {
     {
-        MutexLocker locker(m_lock);
+        LockHolder locker(m_lock);
         HashSet<CompilationKey> deadPlanKeys;
         for (PlanMap::iterator iter = m_plans.begin(); iter != m_plans.end(); ++iter) {
             Plan* plan = iter->value.get();
@@ -295,17 +295,17 @@ void Worklist::removeDeadPlans(VM& vm)
 
 size_t Worklist::queueLength()
 {
-    MutexLocker locker(m_lock);
+    LockHolder locker(m_lock);
     return m_queue.size();
 }
 
 void Worklist::dump(PrintStream& out) const
 {
-    MutexLocker locker(m_lock);
+    LockHolder locker(m_lock);
     dump(locker, out);
 }
 
-void Worklist::dump(const MutexLocker&, PrintStream& out) const
+void Worklist::dump(const LockHolder&, PrintStream& out) const
 {
     out.print(
         "Worklist(", RawPointer(this), ")[Queue Length = ", m_queue.size(),
@@ -325,7 +325,7 @@ void Worklist::runThread(ThreadData* data)
     for (;;) {
         RefPtr<Plan> plan;
         {
-            MutexLocker locker(m_lock);
+            LockHolder locker(m_lock);
             while (m_queue.isEmpty())
                 m_planEnqueued.wait(m_lock);
             
@@ -341,9 +341,9 @@ void Worklist::runThread(ThreadData* data)
         }
         
         {
-            MutexLocker locker(data->m_rightToRun);
+            LockHolder locker(data->m_rightToRun);
             {
-                MutexLocker locker(m_lock);
+                LockHolder locker(m_lock);
                 if (plan->stage == Plan::Cancelled) {
                     m_numberOfActiveThreads--;
                     continue;
@@ -359,7 +359,7 @@ void Worklist::runThread(ThreadData* data)
             RELEASE_ASSERT(!plan->vm.heap.isCollecting());
             
             {
-                MutexLocker locker(m_lock);
+                LockHolder locker(m_lock);
                 if (plan->stage == Plan::Cancelled) {
                     m_numberOfActiveThreads--;
                     continue;
@@ -370,7 +370,7 @@ void Worklist::runThread(ThreadData* data)
         }
 
         {
-            MutexLocker locker(m_lock);
+            LockHolder locker(m_lock);
             
             // We could have been cancelled between releasing rightToRun and acquiring m_lock.
             // This would mean that we might be in the middle of GC right now.
@@ -388,7 +388,7 @@ void Worklist::runThread(ThreadData* data)
             
             m_readyPlans.append(plan);
             
-            m_planCompiled.broadcast();
+            m_planCompiled.notifyAll();
             m_numberOfActiveThreads--;
         }
     }
index bd6e6fa..415b26e 100644 (file)
 
 #include "DFGPlan.h"
 #include "DFGThreadData.h"
+#include <wtf/Condition.h>
 #include <wtf/Deque.h>
 #include <wtf/HashMap.h>
+#include <wtf/Lock.h>
 #include <wtf/Noncopyable.h>
-#include <wtf/ThreadingPrimitives.h>
 
 namespace JSC {
 
@@ -85,7 +86,7 @@ private:
     
     void removeAllReadyPlansForVM(VM&, Vector<RefPtr<Plan>, 8>&);
 
-    void dump(const MutexLocker&, PrintStream&) const;
+    void dump(const LockHolder&, PrintStream&) const;
     
     CString m_threadName;
     
@@ -103,11 +104,11 @@ private:
     // be completed.
     Vector<RefPtr<Plan>, 16> m_readyPlans;
 
-    Mutex m_suspensionLock;
+    Lock m_suspensionLock;
     
-    mutable Mutex m_lock;
-    ThreadCondition m_planEnqueued;
-    ThreadCondition m_planCompiled;
+    mutable Lock m_lock;
+    Condition m_planEnqueued;
+    Condition m_planCompiled;
     
     Vector<std::unique_ptr<ThreadData>> m_threads;
     unsigned m_numberOfActiveThreads;
index 8f35742..c616682 100644 (file)
 #include "Disassembler.h"
 
 #include "MacroAssemblerCodeRef.h"
+#include <wtf/Condition.h>
 #include <wtf/DataLog.h>
 #include <wtf/Deque.h>
+#include <wtf/Lock.h>
 #include <wtf/NeverDestroyed.h>
 #include <wtf/StringPrintStream.h>
 #include <wtf/Threading.h>
-#include <wtf/ThreadingPrimitives.h>
 
 namespace JSC {
 
@@ -78,14 +79,14 @@ public:
     
     void enqueue(std::unique_ptr<DisassemblyTask> task)
     {
-        MutexLocker locker(m_lock);
+        LockHolder locker(m_lock);
         m_queue.append(WTF::move(task));
-        m_condition.broadcast();
+        m_condition.notifyAll();
     }
     
     void waitUntilEmpty()
     {
-        MutexLocker locker(m_lock);
+        LockHolder locker(m_lock);
         while (!m_queue.isEmpty() || m_working)
             m_condition.wait(m_lock);
     }
@@ -96,9 +97,9 @@ private:
         for (;;) {
             std::unique_ptr<DisassemblyTask> task;
             {
-                MutexLocker locker(m_lock);
+                LockHolder locker(m_lock);
                 m_working = false;
-                m_condition.broadcast();
+                m_condition.notifyAll();
                 while (m_queue.isEmpty())
                     m_condition.wait(m_lock);
                 task = m_queue.takeFirst();
@@ -112,8 +113,8 @@ private:
         }
     }
     
-    Mutex m_lock;
-    ThreadCondition m_condition;
+    Lock m_lock;
+    Condition m_condition;
     Deque<std::unique_ptr<DisassemblyTask>> m_queue;
     bool m_working { false };
 };
index 3f265e7..a387499 100644 (file)
@@ -198,12 +198,12 @@ void CopiedSpace::doneFillingBlock(CopiedBlock* block, CopiedBlock** exchange)
     }
 
     {
-        MutexLocker locker(m_loanedBlocksLock);
+        LockHolder locker(m_loanedBlocksLock);
         ASSERT(m_numberOfLoanedBlocks > 0);
         ASSERT(m_inCopyingPhase);
         m_numberOfLoanedBlocks--;
         if (!m_numberOfLoanedBlocks)
-            m_loanedBlocksCondition.signal();
+            m_loanedBlocksCondition.notifyOne();
     }
 }
 
@@ -231,7 +231,7 @@ void CopiedSpace::didStartFullCollection()
 void CopiedSpace::doneCopying()
 {
     {
-        MutexLocker locker(m_loanedBlocksLock);
+        LockHolder locker(m_loanedBlocksLock);
         while (m_numberOfLoanedBlocks > 0)
             m_loanedBlocksCondition.wait(m_loanedBlocksLock);
     }
index d18d1fc..ed2982d 100644 (file)
 #include "TinyBloomFilter.h"
 #include <wtf/Assertions.h>
 #include <wtf/CheckedBoolean.h>
+#include <wtf/Condition.h>
 #include <wtf/DoublyLinkedList.h>
 #include <wtf/HashSet.h>
 #include <wtf/Lock.h>
 #include <wtf/OSAllocator.h>
 #include <wtf/PageBlock.h>
 #include <wtf/StdLibExtras.h>
-#include <wtf/ThreadingPrimitives.h>
 
 namespace JSC {
 
@@ -138,8 +138,8 @@ private:
     bool m_inCopyingPhase;
     bool m_shouldDoCopyPhase;
 
-    Mutex m_loanedBlocksLock; 
-    ThreadCondition m_loanedBlocksCondition;
+    Lock m_loanedBlocksLock; 
+    Condition m_loanedBlocksCondition;
     size_t m_numberOfLoanedBlocks;
     
     size_t m_bytesRemovedFromOldSpaceDueToReallocation;
index 8f4058b..aa4eeff 100644 (file)
@@ -113,12 +113,12 @@ inline void CopiedSpace::recycleBorrowedBlock(CopiedBlock* block)
     CopiedBlock::destroy(block);
 
     {
-        MutexLocker locker(m_loanedBlocksLock);
+        LockHolder locker(m_loanedBlocksLock);
         ASSERT(m_numberOfLoanedBlocks > 0);
         ASSERT(m_inCopyingPhase);
         m_numberOfLoanedBlocks--;
         if (!m_numberOfLoanedBlocks)
-            m_loanedBlocksCondition.signal();
+            m_loanedBlocksCondition.notifyOne();
     }
 }
 
@@ -128,7 +128,7 @@ inline CopiedBlock* CopiedSpace::allocateBlockForCopyingPhase()
     CopiedBlock* block = CopiedBlock::createNoZeroFill();
 
     {
-        MutexLocker locker(m_loanedBlocksLock);
+        LockHolder locker(m_loanedBlocksLock);
         m_numberOfLoanedBlocks++;
     }
 
index 3a3dcea..bf562b5 100644 (file)
@@ -69,12 +69,12 @@ CopyVisitor* GCThread::copyVisitor()
 
 GCPhase GCThread::waitForNextPhase()
 {
-    std::unique_lock<std::mutex> lock(m_shared.m_phaseMutex);
+    std::unique_lock<Lock> lock(m_shared.m_phaseMutex);
     m_shared.m_phaseConditionVariable.wait(lock, [this] { return !m_shared.m_gcThreadsShouldWait; });
 
     m_shared.m_numberOfActiveGCThreads--;
     if (!m_shared.m_numberOfActiveGCThreads)
-        m_shared.m_activityConditionVariable.notify_one();
+        m_shared.m_activityConditionVariable.notifyOne();
 
     m_shared.m_phaseConditionVariable.wait(lock, [this] { return m_shared.m_currentPhase != NoPhase; });
     m_shared.m_numberOfActiveGCThreads++;
@@ -90,7 +90,7 @@ void GCThread::gcThreadMain()
     // Wait for the main thread to finish creating and initializing us. The main thread grabs this lock before 
     // creating this thread. We aren't guaranteed to have a valid threadID until the main thread releases this lock.
     {
-        std::lock_guard<std::mutex> lock(m_shared.m_phaseMutex);
+        std::lock_guard<Lock> lock(m_shared.m_phaseMutex);
     }
     {
         ParallelModeEnabler enabler(*m_slotVisitor);
index 824b1ac..3fad873 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2009, 2011 Apple Inc. All rights reserved.
+ * Copyright (C) 2009, 2011, 2015 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -83,7 +83,7 @@ GCThreadSharedData::GCThreadSharedData(VM* vm)
 {
 #if ENABLE(PARALLEL_GC)
     // Grab the lock so the new GC threads can be properly initialized before they start running.
-    std::unique_lock<std::mutex> lock(m_phaseMutex);
+    std::unique_lock<Lock> lock(m_phaseMutex);
     for (unsigned i = 1; i < Options::numberOfGCMarkers(); ++i) {
         m_numberOfActiveGCThreads++;
         GCThread* newThread = new GCThread(*this, std::make_unique<SlotVisitor>(*this), std::make_unique<CopyVisitor>(*this));
@@ -102,13 +102,13 @@ GCThreadSharedData::~GCThreadSharedData()
 #if ENABLE(PARALLEL_GC)    
     // Destroy our marking threads.
     {
-        std::lock_guard<std::mutex> markingLock(m_markingMutex);
-        std::lock_guard<std::mutex> phaseLock(m_phaseMutex);
+        std::lock_guard<Lock> markingLock(m_markingMutex);
+        std::lock_guard<Lock> phaseLock(m_phaseMutex);
         ASSERT(m_currentPhase == NoPhase);
         m_parallelMarkersShouldExit = true;
         m_gcThreadsShouldWait = false;
         m_currentPhase = Exit;
-        m_phaseConditionVariable.notify_all();
+        m_phaseConditionVariable.notifyAll();
     }
     for (unsigned i = 0; i < m_gcThreads.size(); ++i) {
         waitForThreadCompletion(m_gcThreads[i]->threadID());
@@ -131,21 +131,21 @@ void GCThreadSharedData::reset()
 
 void GCThreadSharedData::startNextPhase(GCPhase phase)
 {
-    std::lock_guard<std::mutex> lock(m_phaseMutex);
+    std::lock_guard<Lock> lock(m_phaseMutex);
     ASSERT(!m_gcThreadsShouldWait);
     ASSERT(m_currentPhase == NoPhase);
     m_gcThreadsShouldWait = true;
     m_currentPhase = phase;
-    m_phaseConditionVariable.notify_all();
+    m_phaseConditionVariable.notifyAll();
 }
 
 void GCThreadSharedData::endCurrentPhase()
 {
     ASSERT(m_gcThreadsShouldWait);
-    std::unique_lock<std::mutex> lock(m_phaseMutex);
+    std::unique_lock<Lock> lock(m_phaseMutex);
     m_currentPhase = NoPhase;
     m_gcThreadsShouldWait = false;
-    m_phaseConditionVariable.notify_all();
+    m_phaseConditionVariable.notifyAll();
     m_activityConditionVariable.wait(lock, [this] { return !m_numberOfActiveGCThreads; });
 }
 
@@ -158,7 +158,7 @@ void GCThreadSharedData::didStartMarking()
         ASSERT(m_opaqueRoots.isEmpty());
 #endif
 }
-    std::lock_guard<std::mutex> lock(m_markingMutex);
+    std::lock_guard<Lock> lock(m_markingMutex);
     m_parallelMarkersShouldExit = false;
     startNextPhase(Mark);
 }
@@ -166,9 +166,9 @@ void GCThreadSharedData::didStartMarking()
 void GCThreadSharedData::didFinishMarking()
 {
     {
-        std::lock_guard<std::mutex> lock(m_markingMutex);
+        std::lock_guard<Lock> lock(m_markingMutex);
         m_parallelMarkersShouldExit = true;
-        m_markingConditionVariable.notify_all();
+        m_markingConditionVariable.notifyAll();
     }
 
     ASSERT(m_currentPhase == Mark);
index 0c5ad1d..ae0cdfb 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2009, 2011 Apple Inc. All rights reserved.
+ * Copyright (C) 2009, 2011, 2015 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -32,6 +32,7 @@
 #include "UnconditionalFinalizer.h"
 #include "WeakReferenceHarvester.h"
 #include <condition_variable>
+#include <wtf/Condition.h>
 #include <wtf/HashSet.h>
 #include <wtf/Lock.h>
 #include <wtf/Vector.h>
@@ -88,13 +89,13 @@ private:
 
     Vector<GCThread*> m_gcThreads;
 
-    std::mutex m_markingMutex;
-    std::condition_variable m_markingConditionVariable;
+    Lock m_markingMutex;
+    Condition m_markingConditionVariable;
     MarkStackArray m_sharedMarkStack;
     unsigned m_numberOfActiveParallelMarkers;
     bool m_parallelMarkersShouldExit;
 
-    std::mutex m_opaqueRootsMutex;
+    Lock m_opaqueRootsMutex;
     HashSet<void*> m_opaqueRoots;
 
     Lock m_copyLock;
@@ -102,9 +103,9 @@ private:
     size_t m_copyIndex;
     static const size_t s_blockFragmentLength = 32;
 
-    std::mutex m_phaseMutex;
-    std::condition_variable m_phaseConditionVariable;
-    std::condition_variable m_activityConditionVariable;
+    Lock m_phaseMutex;
+    Condition m_phaseConditionVariable;
+    Condition m_activityConditionVariable;
     unsigned m_numberOfActiveGCThreads;
     bool m_gcThreadsShouldWait;
     GCPhase m_currentPhase;
index 1efbb9a..bbce5f9 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2012, 2015 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -26,6 +26,7 @@
 #ifndef HeapTimer_h
 #define HeapTimer_h
 
+#include <wtf/Lock.h>
 #include <wtf/RetainPtr.h>
 #include <wtf/Threading.h>
 
@@ -59,7 +60,7 @@ protected:
     RetainPtr<CFRunLoopRef> m_runLoop;
     CFRunLoopTimerContext m_context;
 
-    Mutex m_shutdownMutex;
+    Lock m_shutdownMutex;
 #elif PLATFORM(EFL)
     static bool timerEvent(void*);
     Ecore_Timer* add(double delay, void* agent);
index 1a8e8b1..5156c54 100644 (file)
@@ -103,18 +103,18 @@ public:
         }
 
     private:
-        MutexLocker m_locker;
+        LockHolder m_locker;
     };
 
     void add(MachineThreads* machineThreads)
     {
-        MutexLocker managerLock(m_lock);
+        LockHolder managerLock(m_lock);
         m_set.add(machineThreads);
     }
 
     void remove(MachineThreads* machineThreads)
     {
-        MutexLocker managerLock(m_lock);
+        LockHolder managerLock(m_lock);
         auto recordedMachineThreads = m_set.take(machineThreads);
         RELEASE_ASSERT(recordedMachineThreads = machineThreads);
     }
@@ -129,7 +129,7 @@ private:
 
     ActiveMachineThreadsManager() { }
     
-    Mutex m_lock;
+    Lock m_lock;
     MachineThreadsSet m_set;
 
     friend ActiveMachineThreadsManager& activeMachineThreadsManager();
@@ -263,7 +263,7 @@ MachineThreads::~MachineThreads()
     activeMachineThreadsManager().remove(this);
     threadSpecificKeyDelete(m_threadSpecific);
 
-    MutexLocker registeredThreadsLock(m_registeredThreadsMutex);
+    LockHolder registeredThreadsLock(m_registeredThreadsMutex);
     for (Thread* t = m_registeredThreads; t;) {
         Thread* next = t->next;
         delete t;
@@ -294,7 +294,7 @@ void MachineThreads::addCurrentThread()
     threadSpecificSet(m_threadSpecific, this);
     Thread* thread = Thread::createForCurrentThread();
 
-    MutexLocker lock(m_registeredThreadsMutex);
+    LockHolder lock(m_registeredThreadsMutex);
 
     thread->next = m_registeredThreads;
     m_registeredThreads = thread;
@@ -318,7 +318,7 @@ void MachineThreads::removeThread(void* p)
 template<typename PlatformThread>
 void MachineThreads::removeThreadIfFound(PlatformThread platformThread)
 {
-    MutexLocker lock(m_registeredThreadsMutex);
+    LockHolder lock(m_registeredThreadsMutex);
     Thread* t = m_registeredThreads;
     if (*t == platformThread) {
         m_registeredThreads = m_registeredThreads->next;
@@ -566,7 +566,7 @@ void MachineThreads::tryCopyOtherThreadStack(Thread* thread, void* buffer, size_
     thread->freeRegisters(registers);
 }
 
-bool MachineThreads::tryCopyOtherThreadStacks(MutexLocker&, void* buffer, size_t capacity, size_t* size)
+bool MachineThreads::tryCopyOtherThreadStacks(LockHolder&, void* buffer, size_t capacity, size_t* size)
 {
     // Prevent two VMs from suspending each other's threads at the same time,
     // which can cause deadlock: <rdar://problem/20300842>.
@@ -660,7 +660,7 @@ void MachineThreads::gatherConservativeRoots(ConservativeRoots& conservativeRoot
     size_t size;
     size_t capacity = 0;
     void* buffer = nullptr;
-    MutexLocker lock(m_registeredThreadsMutex);
+    LockHolder lock(m_registeredThreadsMutex);
     while (!tryCopyOtherThreadStacks(lock, buffer, capacity, &size))
         growBuffer(size, &buffer, &capacity);
 
index 0723ad8..3080d2b 100644 (file)
@@ -1,7 +1,7 @@
 /*
  *  Copyright (C) 1999-2000 Harri Porten (porten@kde.org)
  *  Copyright (C) 2001 Peter Kelly (pmk@post.com)
- *  Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009 Apple Inc. All rights reserved.
+ *  Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2015 Apple Inc. All rights reserved.
  *
  *  This library is free software; you can redistribute it and/or
  *  modify it under the terms of the GNU Lesser General Public
@@ -23,9 +23,9 @@
 #define MachineThreads_h
 
 #include <setjmp.h>
+#include <wtf/Lock.h>
 #include <wtf/Noncopyable.h>
 #include <wtf/ThreadSpecific.h>
-#include <wtf/ThreadingPrimitives.h>
 
 namespace JSC {
 
@@ -52,14 +52,14 @@ namespace JSC {
         void gatherFromCurrentThread(ConservativeRoots&, JITStubRoutineSet&, CodeBlockSet&, void* stackOrigin, void* stackTop, RegisterState& calleeSavedRegisters);
 
         void tryCopyOtherThreadStack(Thread*, void*, size_t capacity, size_t*);
-        bool tryCopyOtherThreadStacks(MutexLocker&, void*, size_t capacity, size_t*);
+        bool tryCopyOtherThreadStacks(LockHolder&, void*, size_t capacity, size_t*);
 
         static void removeThread(void*);
 
         template<typename PlatformThread>
         void removeThreadIfFound(PlatformThread);
 
-        Mutex m_registeredThreadsMutex;
+        Lock m_registeredThreadsMutex;
         Thread* m_registeredThreads;
         WTF::ThreadSpecificKey m_threadSpecific;
 #if !ASSERT_DISABLED
index 4de4966..a225386 100644 (file)
@@ -1,3 +1,28 @@
+/*
+ * Copyright (C) 2012, 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
 #include "config.h"
 #include "SlotVisitor.h"
 #include "SlotVisitorInlines.h"
@@ -12,6 +37,7 @@
 #include "JSObject.h"
 #include "JSString.h"
 #include "JSCInlines.h"
+#include <wtf/Lock.h>
 #include <wtf/StackStats.h>
 
 namespace JSC {
@@ -121,7 +147,7 @@ void SlotVisitor::donateKnownParallel()
 
     // If we're contending on the lock, be conservative and assume that another
     // thread is already donating.
-    std::unique_lock<std::mutex> lock(m_shared.m_markingMutex, std::try_to_lock);
+    std::unique_lock<Lock> lock(m_shared.m_markingMutex, std::try_to_lock);
     if (!lock.owns_lock())
         return;
 
@@ -129,7 +155,7 @@ void SlotVisitor::donateKnownParallel()
     m_stack.donateSomeCellsTo(m_shared.m_sharedMarkStack);
 
     if (m_shared.m_numberOfActiveParallelMarkers < Options::numberOfGCMarkers())
-        m_shared.m_markingConditionVariable.notify_all();
+        m_shared.m_markingConditionVariable.notifyAll();
 }
 
 void SlotVisitor::drain()
@@ -184,12 +210,12 @@ void SlotVisitor::drainFromShared(SharedDrainMode sharedDrainMode)
     
 #if ENABLE(PARALLEL_GC)
     {
-        std::lock_guard<std::mutex> lock(m_shared.m_markingMutex);
+        std::lock_guard<Lock> lock(m_shared.m_markingMutex);
         m_shared.m_numberOfActiveParallelMarkers++;
     }
     while (true) {
         {
-            std::unique_lock<std::mutex> lock(m_shared.m_markingMutex);
+            std::unique_lock<Lock> lock(m_shared.m_markingMutex);
             m_shared.m_numberOfActiveParallelMarkers--;
 
             // How we wait differs depending on drain mode.
@@ -200,7 +226,7 @@ void SlotVisitor::drainFromShared(SharedDrainMode sharedDrainMode)
                     // Did we reach termination?
                     if (!m_shared.m_numberOfActiveParallelMarkers && m_shared.m_sharedMarkStack.isEmpty()) {
                         // Let any sleeping slaves know it's time for them to return;
-                        m_shared.m_markingConditionVariable.notify_all();
+                        m_shared.m_markingConditionVariable.notifyAll();
                         return;
                     }
                     
@@ -216,7 +242,7 @@ void SlotVisitor::drainFromShared(SharedDrainMode sharedDrainMode)
                 
                 // Did we detect termination? If so, let the master know.
                 if (!m_shared.m_numberOfActiveParallelMarkers && m_shared.m_sharedMarkStack.isEmpty())
-                    m_shared.m_markingConditionVariable.notify_all();
+                    m_shared.m_markingConditionVariable.notifyAll();
 
                 m_shared.m_markingConditionVariable.wait(lock, [this] { return !m_shared.m_sharedMarkStack.isEmpty() || m_shared.m_parallelMarkersShouldExit; });
                 
@@ -240,7 +266,7 @@ void SlotVisitor::mergeOpaqueRoots()
     StackStats::probe();
     ASSERT(!m_opaqueRoots.isEmpty()); // Should only be called when opaque roots are non-empty.
     {
-        std::lock_guard<std::mutex> lock(m_shared.m_opaqueRootsMutex);
+        std::lock_guard<Lock> lock(m_shared.m_opaqueRootsMutex);
         for (auto* root : m_opaqueRoots)
             m_shared.m_opaqueRoots.add(root);
     }
index 548b99d..3338b1c 100644 (file)
@@ -188,7 +188,7 @@ inline TriState SlotVisitor::containsOpaqueRootTriState(void* root) const
 {
     if (m_opaqueRoots.contains(root))
         return TrueTriState;
-    std::lock_guard<std::mutex> lock(m_shared.m_opaqueRootsMutex);
+    std::lock_guard<Lock> lock(m_shared.m_opaqueRootsMutex);
     if (m_shared.m_opaqueRoots.contains(root))
         return TrueTriState;
     return MixedTriState;
index cedb7ab..e5e5d4f 100644 (file)
@@ -31,6 +31,7 @@
 #import "InspectorFrontendChannel.h"
 #import "RemoteInspectorDebuggable.h"
 #import <mutex>
+#import <wtf/Lock.h>
 #import <wtf/RetainPtr.h>
 #import <wtf/ThreadSafeRefCounted.h>
 
@@ -91,7 +92,7 @@ public:
     void sendMessageToBackend(NSString *);
     virtual bool sendMessageToFrontend(const String&) override;
 
-    std::mutex& queueMutex() { return m_queueMutex; }
+    Lock& queueMutex() { return m_queueMutex; }
     RemoteInspectorQueue queue() const { return m_queue; }
     void clearQueue() { m_queue.clear(); }
 
@@ -105,14 +106,14 @@ private:
     // This connection from the RemoteInspector singleton to the Debuggable
     // can be used on multiple threads. So any access to the debuggable
     // itself must take this mutex to ensure m_debuggable is valid.
-    std::mutex m_debuggableMutex;
+    Lock m_debuggableMutex;
 
     // If a debuggable has a specific run loop it wants to evaluate on
     // we setup our run loop sources on that specific run loop.
     RetainPtr<CFRunLoopRef> m_runLoop;
     RetainPtr<CFRunLoopSourceRef> m_runLoopSource;
     RemoteInspectorQueue m_queue;
-    std::mutex m_queueMutex;
+    Lock m_queueMutex;
 
     RemoteInspectorDebuggable* m_debuggable;
     RetainPtr<NSString> m_connectionIdentifier;
index 0821266..bedaad7 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2013 Apple Inc. All Rights Reserved.
+ * Copyright (C) 2013, 2015 Apple Inc. All Rights Reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
 
 namespace Inspector {
 
-static std::mutex* rwiQueueMutex;
+static StaticLock rwiQueueMutex;
 static CFRunLoopSourceRef rwiRunLoopSource;
 static RemoteInspectorQueue* rwiQueue;
 
 static void RemoteInspectorHandleRunSourceGlobal(void*)
 {
     ASSERT(CFRunLoopGetCurrent() == CFRunLoopGetMain());
-    ASSERT(rwiQueueMutex);
     ASSERT(rwiRunLoopSource);
     ASSERT(rwiQueue);
 
     RemoteInspectorQueue queueCopy;
     {
-        std::lock_guard<std::mutex> lock(*rwiQueueMutex);
+        std::lock_guard<StaticLock> lock(rwiQueueMutex);
         queueCopy = *rwiQueue;
         rwiQueue->clear();
     }
@@ -63,12 +62,11 @@ static void RemoteInspectorHandleRunSourceGlobal(void*)
 
 static void RemoteInspectorQueueTaskOnGlobalQueue(void (^task)())
 {
-    ASSERT(rwiQueueMutex);
     ASSERT(rwiRunLoopSource);
     ASSERT(rwiQueue);
 
     {
-        std::lock_guard<std::mutex> lock(*rwiQueueMutex);
+        std::lock_guard<StaticLock> lock(rwiQueueMutex);
         rwiQueue->append(RemoteInspectorBlock(task));
     }
 
@@ -81,7 +79,6 @@ static void RemoteInspectorInitializeGlobalQueue()
     static dispatch_once_t pred;
     dispatch_once(&pred, ^{
         rwiQueue = new RemoteInspectorQueue;
-        rwiQueueMutex = std::make_unique<std::mutex>().release();
 
         CFRunLoopSourceContext runLoopSourceContext = {0, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, RemoteInspectorHandleRunSourceGlobal};
         rwiRunLoopSource = CFRunLoopSourceCreate(kCFAllocatorDefault, 1, &runLoopSourceContext);
@@ -98,7 +95,7 @@ static void RemoteInspectorHandleRunSourceWithInfo(void* info)
 
     RemoteInspectorQueue queueCopy;
     {
-        std::lock_guard<std::mutex> lock(debuggableConnection->queueMutex());
+        std::lock_guard<Lock> lock(debuggableConnection->queueMutex());
         queueCopy = debuggableConnection->queue();
         debuggableConnection->clearQueue();
     }
@@ -152,7 +149,7 @@ void RemoteInspectorDebuggableConnection::dispatchAsyncOnDebuggable(void (^block
 
 bool RemoteInspectorDebuggableConnection::setup(bool isAutomaticInspection, bool automaticallyPause)
 {
-    std::lock_guard<std::mutex> lock(m_debuggableMutex);
+    std::lock_guard<Lock> lock(m_debuggableMutex);
 
     if (!m_debuggable)
         return false;
@@ -160,7 +157,7 @@ bool RemoteInspectorDebuggableConnection::setup(bool isAutomaticInspection, bool
     ref();
     dispatchAsyncOnDebuggable(^{
         {
-            std::lock_guard<std::mutex> lock(m_debuggableMutex);
+            std::lock_guard<Lock> lock(m_debuggableMutex);
             if (!m_debuggable || !m_debuggable->remoteDebuggingAllowed() || m_debuggable->hasLocalDebugger()) {
                 RemoteInspector::singleton().setupFailed(identifier());
                 m_debuggable = nullptr;
@@ -180,7 +177,7 @@ bool RemoteInspectorDebuggableConnection::setup(bool isAutomaticInspection, bool
 
 void RemoteInspectorDebuggableConnection::closeFromDebuggable()
 {
-    std::lock_guard<std::mutex> lock(m_debuggableMutex);
+    std::lock_guard<Lock> lock(m_debuggableMutex);
 
     m_debuggable = nullptr;
 }
@@ -190,7 +187,7 @@ void RemoteInspectorDebuggableConnection::close()
     ref();
     dispatchAsyncOnDebuggable(^{
         {
-            std::lock_guard<std::mutex> lock(m_debuggableMutex);
+            std::lock_guard<Lock> lock(m_debuggableMutex);
 
             if (m_debuggable) {
                 if (m_connected)
@@ -210,7 +207,7 @@ void RemoteInspectorDebuggableConnection::sendMessageToBackend(NSString *message
         {
             RemoteInspectorDebuggable* debuggable = nullptr;
             {
-                std::lock_guard<std::mutex> lock(m_debuggableMutex);
+                std::lock_guard<Lock> lock(m_debuggableMutex);
                 if (!m_debuggable)
                     return;
                 debuggable = m_debuggable;
@@ -263,7 +260,7 @@ void RemoteInspectorDebuggableConnection::queueTaskOnPrivateRunLoop(void (^block
     ASSERT(m_runLoop);
 
     {
-        std::lock_guard<std::mutex> lock(m_queueMutex);
+        std::lock_guard<Lock> lock(m_queueMutex);
         m_queue.append(RemoteInspectorBlock(block));
     }
 
index 025fef0..d755aaa 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2008, 2013, 2014 Apple Inc. All rights reserved.
+ * Copyright (C) 2008, 2013, 2014, 2015 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
 #include "Interpreter.h"
 #include "JSCInlines.h"
 #include "Options.h"
+#include <wtf/Lock.h>
 
 namespace JSC {
 
 #if !ENABLE(JIT)
 static size_t committedBytesCount = 0;
 
-static Mutex& stackStatisticsMutex()
-{
-    DEPRECATED_DEFINE_STATIC_LOCAL(Mutex, staticMutex, ());
-    return staticMutex;
-}    
+static StaticLock stackStatisticsMutex;
 #endif // !ENABLE(JIT)
 
 JSStack::JSStack(VM& vm)
@@ -139,14 +136,9 @@ void JSStack::releaseExcessCapacity()
     m_commitTop = highAddressWithReservedZone;
 }
 
-void JSStack::initializeThreading()
-{
-    stackStatisticsMutex();
-}
-
 void JSStack::addToCommittedByteCount(long byteCount)
 {
-    MutexLocker locker(stackStatisticsMutex());
+    LockHolder locker(stackStatisticsMutex);
     ASSERT(static_cast<long>(committedBytesCount) + byteCount > -1);
     committedBytesCount += byteCount;
 }
@@ -176,7 +168,7 @@ Register* JSStack::highAddress() const
 size_t JSStack::committedByteCount()
 {
 #if !ENABLE(JIT)
-    MutexLocker locker(stackStatisticsMutex());
+    LockHolder locker(stackStatisticsMutex);
     return committedBytesCount;
 #else
     // When using the C stack, we don't know how many stack pages are actually
index 401fbc0..07cbbe6 100644 (file)
@@ -82,7 +82,6 @@ namespace JSC {
         void gatherConservativeRoots(ConservativeRoots&) { }
         void gatherConservativeRoots(ConservativeRoots&, JITStubRoutineSet&, CodeBlockSet&) { }
         void sanitizeStack() { }
-        static void initializeThreading() { }
 #else
         ~JSStack();
 
@@ -97,8 +96,6 @@ namespace JSC {
 
         size_t size() const { return highAddress() - lowAddress(); }
 
-        static void initializeThreading();
-
         void setReservedZoneSize(size_t);
 
         inline Register* topOfStack();
index bb49e73..d2911a3 100644 (file)
 #if ENABLE(EXECUTABLE_ALLOCATOR_DEMAND)
 #include "CodeProfiling.h"
 #include <wtf/HashSet.h>
+#include <wtf/Lock.h>
 #include <wtf/MetaAllocator.h>
 #include <wtf/NeverDestroyed.h>
 #include <wtf/PageReservation.h>
-#include <wtf/ThreadingPrimitives.h>
 #include <wtf/VMTags.h>
 #endif
 
@@ -56,7 +56,7 @@ public:
     DemandExecutableAllocator()
         : MetaAllocator(jitAllocationGranule)
     {
-        std::lock_guard<std::mutex> lock(allocatorsMutex());
+        std::lock_guard<StaticLock> lock(allocatorsMutex());
         allocators().add(this);
         // Don't preallocate any memory here.
     }
@@ -64,7 +64,7 @@ public:
     virtual ~DemandExecutableAllocator()
     {
         {
-            std::lock_guard<std::mutex> lock(allocatorsMutex());
+            std::lock_guard<StaticLock> lock(allocatorsMutex());
             allocators().remove(this);
         }
         for (unsigned i = 0; i < reservations.size(); ++i)
@@ -74,7 +74,7 @@ public:
     static size_t bytesAllocatedByAllAllocators()
     {
         size_t total = 0;
-        std::lock_guard<std::mutex> lock(allocatorsMutex());
+        std::lock_guard<StaticLock> lock(allocatorsMutex());
         for (HashSet<DemandExecutableAllocator*>::const_iterator allocator = allocators().begin(); allocator != allocators().end(); ++allocator)
             total += (*allocator)->bytesAllocated();
         return total;
@@ -83,7 +83,7 @@ public:
     static size_t bytesCommittedByAllocactors()
     {
         size_t total = 0;
-        std::lock_guard<std::mutex> lock(allocatorsMutex());
+        std::lock_guard<StaticLock> lock(allocatorsMutex());
         for (HashSet<DemandExecutableAllocator*>::const_iterator allocator = allocators().begin(); allocator != allocators().end(); ++allocator)
             total += (*allocator)->bytesCommitted();
         return total;
@@ -92,7 +92,7 @@ public:
 #if ENABLE(META_ALLOCATOR_PROFILE)
     static void dumpProfileFromAllAllocators()
     {
-        std::lock_guard<std::mutex> lock(allocatorsMutex());
+        std::lock_guard<StaticLock> lock(allocatorsMutex());
         for (HashSet<DemandExecutableAllocator*>::const_iterator allocator = allocators().begin(); allocator != allocators().end(); ++allocator)
             (*allocator)->dumpProfile();
     }
@@ -138,9 +138,9 @@ private:
         return sAllocators;
     }
 
-    static std::mutex& allocatorsMutex()
+    static StaticLock& allocatorsMutex()
     {
-        static NeverDestroyed<std::mutex> mutex;
+        static StaticLock mutex;
 
         return mutex;
     }
index 0a50a90..fa61044 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2012, 2013, 2015 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -66,7 +66,7 @@ MacroAssemblerCodePtr JITThunks::ctiNativeTailCall(VM* vm)
 
 MacroAssemblerCodeRef JITThunks::ctiStub(VM* vm, ThunkGenerator generator)
 {
-    Locker locker(m_lock);
+    LockHolder locker(m_lock);
     CTIStubMap::AddResult entry = m_ctiStubMap.add(generator, MacroAssemblerCodeRef());
     if (entry.isNewEntry) {
         // Compilation thread can only retrieve existing entries.
index 64a06b5..2e02883 100644 (file)
@@ -63,10 +63,6 @@ public:
     void clearHostFunctionStubs();
 
 private:
-    // Main thread can hold this lock for a while, so use an adaptive mutex.
-    typedef Mutex Lock;
-    typedef MutexLocker Locker;
-
     void finalize(Handle<Unknown>, void* context) override;
     
     typedef HashMap<ThunkGenerator, MacroAssemblerCodeRef> CTIStubMap;
index d78305a..fc952c0 100644 (file)
@@ -57,7 +57,7 @@ Database::~Database()
 
 Bytecodes* Database::ensureBytecodesFor(CodeBlock* codeBlock)
 {
-    Locker locker(m_lock);
+    LockHolder locker(m_lock);
     
     codeBlock = codeBlock->baselineVersion();
     
@@ -75,7 +75,7 @@ Bytecodes* Database::ensureBytecodesFor(CodeBlock* codeBlock)
 
 void Database::notifyDestruction(CodeBlock* codeBlock)
 {
-    Locker locker(m_lock);
+    LockHolder locker(m_lock);
     
     m_bytecodesMap.remove(codeBlock);
 }
index 7d4f3cf..9bb64cf 100644 (file)
@@ -32,6 +32,7 @@
 #include "ProfilerCompilationKind.h"
 #include <wtf/FastMalloc.h>
 #include <wtf/HashMap.h>
+#include <wtf/Lock.h>
 #include <wtf/Noncopyable.h>
 #include <wtf/PassRefPtr.h>
 #include <wtf/SegmentedVector.h>
@@ -70,21 +71,6 @@ public:
     void registerToSaveAtExit(const char* filename);
     
 private:
-    // Use a full-blown adaptive mutex because:
-    // - There is only one ProfilerDatabase per VM. The size overhead of the system's
-    //   mutex is negligible if you only have one of them.
-    // - It's locked infrequently - once per bytecode generation, compilation, and
-    //   code block collection - so the fact that the fast path still requires a
-    //   function call is neglible.
-    // - It tends to be held for a while. Currently, we hold it while generating
-    //   Profiler::Bytecodes for a CodeBlock. That's uncommon and shouldn't affect
-    //   performance, but if we did have contention, we would want a sensible,
-    //   power-aware backoff. An adaptive mutex will do this as a matter of course,
-    //   but a spinlock won't.
-    typedef Mutex Lock;
-    typedef MutexLocker Locker;
-    
-
     void addDatabaseToAtExit();
     void removeDatabaseFromAtExit();
     void performAtExitSave() const;
index 2d7adbd..d74460f 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2008 Apple Inc. All rights reserved.
+ * Copyright (C) 2008, 2015 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -56,7 +56,6 @@ void initializeThreading()
     std::call_once(initializeThreadingOnceFlag, []{
         WTF::double_conversion::initialize();
         WTF::initializeThreading();
-        GlobalJSLock::initialize();
         Options::initialize();
         if (Options::recordGCPauseTimes())
             HeapStatistics::initialize();
@@ -66,7 +65,6 @@ void initializeThreading()
 #if ENABLE(ASSEMBLER)
         ExecutableAllocator::initializeAllocator();
 #endif
-        JSStack::initializeThreading();
         LLInt::initialize();
 #ifndef NDEBUG
         DisallowGC::initialize();
index 1526d09..828df16 100644 (file)
 
 namespace JSC {
 
-std::mutex* GlobalJSLock::s_sharedInstanceMutex;
+StaticLock GlobalJSLock::s_sharedInstanceMutex;
 
 GlobalJSLock::GlobalJSLock()
 {
-    s_sharedInstanceMutex->lock();
+    s_sharedInstanceMutex.lock();
 }
 
 GlobalJSLock::~GlobalJSLock()
 {
-    s_sharedInstanceMutex->unlock();
-}
-
-void GlobalJSLock::initialize()
-{
-    s_sharedInstanceMutex = new std::mutex();
+    s_sharedInstanceMutex.unlock();
 }
 
 JSLockHolder::JSLockHolder(ExecState* exec)
index c4339c0..4d0d174 100644 (file)
@@ -24,6 +24,7 @@
 #include <mutex>
 #include <thread>
 #include <wtf/Assertions.h>
+#include <wtf/Lock.h>
 #include <wtf/Noncopyable.h>
 #include <wtf/RefPtr.h>
 #include <wtf/ThreadSafeRefCounted.h>
@@ -60,10 +61,8 @@ class GlobalJSLock {
 public:
     JS_EXPORT_PRIVATE GlobalJSLock();
     JS_EXPORT_PRIVATE ~GlobalJSLock();
-
-    static void initialize();
 private:
-    static std::mutex* s_sharedInstanceMutex;
+    static StaticLock s_sharedInstanceMutex;
 };
 
 class JSLockHolder {
@@ -133,7 +132,7 @@ private:
     unsigned dropAllLocks(DropAllLocks*);
     void grabAllLocks(DropAllLocks*, unsigned lockCount);
 
-    std::mutex m_lock;
+    Lock m_lock;
     std::thread::id m_ownerThreadID;
     intptr_t m_lockCount;
     unsigned m_lockDropDepth;
index 221dee2..6d43b5b 100644 (file)
@@ -1,3 +1,17 @@
+2015-08-13  Filip Pizlo  <fpizlo@apple.com>
+
+        Use WTF::Lock and WTF::Condition instead of WTF::Mutex, WTF::ThreadCondition, std::mutex, and std::condition_variable
+        https://bugs.webkit.org/show_bug.cgi?id=147999
+
+        Reviewed by Geoffrey Garen.
+
+        * wtf/Condition.h: "using WTF::Condition".
+        * wtf/Lock.h:
+        (WTF::LockBase::lock):
+        (WTF::LockBase::tryLock): Add tryLock() because it turns out that we use it sometimes.
+        (WTF::LockBase::try_lock): unique_lock needs this.
+        (WTF::LockBase::unlock):
+
 2015-08-13  Commit Queue  <commit-queue@webkit.org>
 
         Unreviewed, rolling out r188428.
index b155ef9..bd841fb 100644 (file)
@@ -109,5 +109,7 @@ private:
 
 } // namespace WTF
 
+using WTF::Condition;
+
 #endif // WTF_Condition_h
 
index c9a1129..3b57894 100644 (file)
@@ -56,6 +56,23 @@ struct LockBase {
         lockSlow();
     }
 
+    bool tryLock()
+    {
+        for (;;) {
+            uint8_t currentByteValue = m_byte.load();
+            if (currentByteValue & isHeldBit)
+                return false;
+            if (m_byte.compareExchangeWeak(currentByteValue, currentByteValue | isHeldBit))
+                return true;
+        }
+    }
+
+    // Need this version for std::unique_lock.
+    bool try_lock()
+    {
+        return tryLock();
+    }
+
     void unlock()
     {
         if (LIKELY(m_byte.compareExchangeWeak(isHeldBit, 0, std::memory_order_release))) {