Replace WTF::move with WTFMove
[WebKit-https.git] / Source / WebCore / Modules / webaudio / AudioContext.cpp
index 714ab52..ef28f97 100644 (file)
 #include "DelayNode.h"
 #include "Document.h"
 #include "DynamicsCompressorNode.h"
+#include "EventNames.h"
 #include "ExceptionCode.h"
 #include "FFTFrame.h"
 #include "GainNode.h"
+#include "GenericEventQueue.h"
 #include "HRTFDatabaseLoader.h"
 #include "HRTFPanner.h"
+#include "JSDOMPromise.h"
 #include "OfflineAudioCompletionEvent.h"
 #include "OfflineAudioDestinationNode.h"
 #include "OscillatorNode.h"
+#include "Page.h"
 #include "PannerNode.h"
-#include "ScriptCallStack.h"
+#include "PeriodicWave.h"
+#include "ScriptController.h"
 #include "ScriptProcessorNode.h"
 #include "WaveShaperNode.h"
-#include "WaveTable.h"
-#include "WebCoreMemoryInstrumentation.h"
-#include <wtf/MemoryInstrumentationHashSet.h>
-#include <wtf/MemoryInstrumentationVector.h>
+#include <inspector/ScriptCallStack.h>
+#include <wtf/NeverDestroyed.h>
 
 #if ENABLE(MEDIA_STREAM)
 #include "MediaStream.h"
 #include "MediaStreamAudioDestinationNode.h"
+#include "MediaStreamAudioSource.h"
 #include "MediaStreamAudioSourceNode.h"
 #endif
 
 #include "GStreamerUtilities.h"
 #endif
 
-#include <wtf/ArrayBuffer.h>
+#if PLATFORM(IOS)
+#include "ScriptController.h"
+#include "Settings.h"
+#endif
+
+#include <runtime/ArrayBuffer.h>
 #include <wtf/Atomics.h>
 #include <wtf/MainThread.h>
-#include <wtf/OwnPtr.h>
-#include <wtf/PassOwnPtr.h>
+#include <wtf/Ref.h>
 #include <wtf/RefCounted.h>
 #include <wtf/text/WTFString.h>
 
 // FIXME: check the proper way to reference an undefined thread ID
 const int UndefinedThreadIdentifier = 0xffffffff;
 
-const unsigned MaxNodesToDeletePerQuantum = 10;
+const unsigned MaxPeriodicWaveLength = 4096;
 
 namespace WebCore {
     
-namespace {
-    
-bool isSampleRateRangeGood(float sampleRate)
+bool AudioContext::isSampleRateRangeGood(float sampleRate)
 {
     // FIXME: It would be nice if the minimum sample-rate could be less than 44.1KHz,
     // but that will require some fixes in HRTFPanner::fftSizeForSampleRate(), and some testing there.
     return sampleRate >= 44100 && sampleRate <= 96000;
 }
 
-}
-
 // Don't allow more than this number of simultaneous AudioContexts talking to hardware.
 const unsigned MaxHardwareContexts = 4;
 unsigned AudioContext::s_hardwareContextCount = 0;
     
-PassRefPtr<AudioContext> AudioContext::create(Document* document, ExceptionCode& ec)
+RefPtr<AudioContext> AudioContext::create(Document& document, ExceptionCode& ec)
 {
     UNUSED_PARAM(ec);
 
-    ASSERT(document);
     ASSERT(isMainThread());
     if (s_hardwareContextCount >= MaxHardwareContexts)
-        return 0;
+        return nullptr;
 
     RefPtr<AudioContext> audioContext(adoptRef(new AudioContext(document)));
     audioContext->suspendIfNeeded();
-    return audioContext.release();
-}
-
-PassRefPtr<AudioContext> AudioContext::createOfflineContext(Document* document, unsigned numberOfChannels, size_t numberOfFrames, float sampleRate, ExceptionCode& ec)
-{
-    ASSERT(document);
-
-    // FIXME: offline contexts have limitations on supported sample-rates.
-    // Currently all AudioContexts must have the same sample-rate.
-    HRTFDatabaseLoader* loader = HRTFDatabaseLoader::loader();
-    if (numberOfChannels > 10 || !isSampleRateRangeGood(sampleRate) || (loader && loader->databaseSampleRate() != sampleRate)) {
-        ec = SYNTAX_ERR;
-        return 0;
-    }
-
-    RefPtr<AudioContext> audioContext(adoptRef(new AudioContext(document, numberOfChannels, numberOfFrames, sampleRate)));
-    audioContext->suspendIfNeeded();
-    return audioContext.release();
+    return audioContext;
 }
 
 // Constructor for rendering to the audio hardware.
-AudioContext::AudioContext(Document* document)
-    : ActiveDOMObject(document, this)
-    , m_isStopScheduled(false)
-    , m_isInitialized(false)
-    , m_isAudioThreadFinished(false)
-    , m_destinationNode(0)
-    , m_isDeletionScheduled(false)
-    , m_automaticPullNodesNeedUpdating(false)
-    , m_connectionCount(0)
-    , m_audioThread(0)
+AudioContext::AudioContext(Document& document)
+    : ActiveDOMObject(&document)
+    , m_mediaSession(PlatformMediaSession::create(*this))
+    , m_eventQueue(std::make_unique<GenericEventQueue>(*this))
     , m_graphOwnerThread(UndefinedThreadIdentifier)
-    , m_isOfflineContext(false)
-    , m_activeSourceCount(0)
 {
     constructCommon();
 
     m_destinationNode = DefaultAudioDestinationNode::create(this);
 
-    // This sets in motion an asynchronous loading mechanism on another thread.
-    // We can check m_hrtfDatabaseLoader->isLoaded() to find out whether or not it has been fully loaded.
-    // It's not that useful to have a callback function for this since the audio thread automatically starts rendering on the graph
-    // when this has finished (see AudioDestinationNode).
-    m_hrtfDatabaseLoader = HRTFDatabaseLoader::createAndLoadAsynchronouslyIfNecessary(sampleRate());
+    // Initialize the destination node's muted state to match the page's current muted state.
+    pageMutedStateDidChange();
 }
 
 // Constructor for offline (non-realtime) rendering.
-AudioContext::AudioContext(Document* document, unsigned numberOfChannels, size_t numberOfFrames, float sampleRate)
-    : ActiveDOMObject(document, this)
-    , m_isStopScheduled(false)
-    , m_isInitialized(false)
-    , m_isAudioThreadFinished(false)
-    , m_destinationNode(0)
-    , m_automaticPullNodesNeedUpdating(false)
-    , m_connectionCount(0)
-    , m_audioThread(0)
-    , m_graphOwnerThread(UndefinedThreadIdentifier)
+AudioContext::AudioContext(Document& document, unsigned numberOfChannels, size_t numberOfFrames, float sampleRate)
+    : ActiveDOMObject(&document)
     , m_isOfflineContext(true)
-    , m_activeSourceCount(0)
+    , m_mediaSession(PlatformMediaSession::create(*this))
+    , m_eventQueue(std::make_unique<GenericEventQueue>(*this))
+    , m_graphOwnerThread(UndefinedThreadIdentifier)
 {
     constructCommon();
 
-    // FIXME: the passed in sampleRate MUST match the hardware sample-rate since HRTFDatabaseLoader is a singleton.
-    m_hrtfDatabaseLoader = HRTFDatabaseLoader::createAndLoadAsynchronouslyIfNecessary(sampleRate);
-
     // Create a new destination for offline rendering.
     m_renderTarget = AudioBuffer::create(numberOfChannels, numberOfFrames, sampleRate);
     m_destinationNode = OfflineAudioDestinationNode::create(this, m_renderTarget.get());
@@ -204,6 +170,19 @@ void AudioContext::constructCommon()
     FFTFrame::initialize();
     
     m_listener = AudioListener::create();
+
+#if PLATFORM(IOS)
+    if (!document()->settings() || document()->settings()->requiresUserGestureForMediaPlayback())
+        addBehaviorRestriction(RequireUserGestureForAudioStartRestriction);
+    else
+        m_restrictions = NoRestrictions;
+#endif
+
+#if PLATFORM(COCOA)
+    addBehaviorRestriction(RequirePageConsentForAudioStartRestriction);
+#endif
+
+    m_mediaSession->setCanProduceAudio(true);
 }
 
 AudioContext::~AudioContext()
@@ -211,50 +190,55 @@ AudioContext::~AudioContext()
 #if DEBUG_AUDIONODE_REFERENCES
     fprintf(stderr, "%p: AudioContext::~AudioContext()\n", this);
 #endif
-    // AudioNodes keep a reference to their context, so there should be no way to be in the destructor if there are still AudioNodes around.
     ASSERT(!m_isInitialized);
     ASSERT(m_isStopScheduled);
-    ASSERT(!m_nodesToDelete.size());
-    ASSERT(!m_referencedNodes.size());
-    ASSERT(!m_finishedNodes.size());
-    ASSERT(!m_automaticPullNodes.size());
-    ASSERT(!m_renderingAutomaticPullNodes.size());
+    ASSERT(m_nodesToDelete.isEmpty());
+    ASSERT(m_referencedNodes.isEmpty());
+    ASSERT(m_finishedNodes.isEmpty()); // FIXME (bug 105870): This assertion fails on tests sometimes.
+    ASSERT(m_automaticPullNodes.isEmpty());
+    if (m_automaticPullNodesNeedUpdating)
+        m_renderingAutomaticPullNodes.resize(m_automaticPullNodes.size());
+    ASSERT(m_renderingAutomaticPullNodes.isEmpty());
+    // FIXME: Can we assert that m_deferredFinishDerefList is empty?
 }
 
 void AudioContext::lazyInitialize()
 {
-    if (!m_isInitialized) {
-        // Don't allow the context to initialize a second time after it's already been explicitly uninitialized.
-        ASSERT(!m_isAudioThreadFinished);
-        if (!m_isAudioThreadFinished) {
-            if (m_destinationNode.get()) {
-                m_destinationNode->initialize();
-
-                if (!isOfflineContext()) {
-                    // This starts the audio thread. The destination node's provideInput() method will now be called repeatedly to render audio.
-                    // Each time provideInput() is called, a portion of the audio stream is rendered. Let's call this time period a "render quantum".
-                    // NOTE: for now default AudioContext does not need an explicit startRendering() call from JavaScript.
-                    // We may want to consider requiring it for symmetry with OfflineAudioContext.
-                    m_destinationNode->startRendering();                    
-                    ++s_hardwareContextCount;
-                }
-
-            }
-            m_isInitialized = true;
+    if (m_isInitialized)
+        return;
+
+    // Don't allow the context to initialize a second time after it's already been explicitly uninitialized.
+    ASSERT(!m_isAudioThreadFinished);
+    if (m_isAudioThreadFinished)
+        return;
+
+    if (m_destinationNode.get()) {
+        m_destinationNode->initialize();
+
+        if (!isOfflineContext()) {
+            document()->addAudioProducer(this);
+
+            // This starts the audio thread. The destination node's provideInput() method will now be called repeatedly to render audio.
+            // Each time provideInput() is called, a portion of the audio stream is rendered. Let's call this time period a "render quantum".
+            // NOTE: for now default AudioContext does not need an explicit startRendering() call from JavaScript.
+            // We may want to consider requiring it for symmetry with OfflineAudioContext.
+            startRendering();
+            ++s_hardwareContextCount;
         }
     }
+    m_isInitialized = true;
 }
 
 void AudioContext::clear()
 {
     // We have to release our reference to the destination node before the context will ever be deleted since the destination node holds a reference to the context.
     if (m_destinationNode)
-        m_destinationNode.clear();
+        m_destinationNode = nullptr;
 
     // Audio thread is dead. Nobody will schedule node deletion action. Let's do it ourselves.
     do {
         deleteMarkedNodes();
-        m_nodesToDelete.append(m_nodesMarkedForDeletion);
+        m_nodesToDelete.appendVector(m_nodesMarkedForDeletion);
         m_nodesMarkedForDeletion.clear();
     } while (m_nodesToDelete.size());
 
@@ -276,8 +260,13 @@ void AudioContext::uninitialize()
     m_isAudioThreadFinished = true;
 
     if (!isOfflineContext()) {
+        document()->removeAudioProducer(this);
+
         ASSERT(s_hardwareContextCount);
         --s_hardwareContextCount;
+
+        // Offline contexts move to 'Closed' state when dispatching the completion event.
+        setState(State::Closed);
     }
 
     // Get rid of the sources which may still be playing.
@@ -291,46 +280,109 @@ bool AudioContext::isInitialized() const
     return m_isInitialized;
 }
 
-bool AudioContext::isRunnable() const
+void AudioContext::addReaction(State state, Promise&& promise)
 {
-    if (!isInitialized())
-        return false;
-    
-    // Check with the HRTF spatialization system to see if it's finished loading.
-    return m_hrtfDatabaseLoader->isLoaded();
+    size_t stateIndex = static_cast<size_t>(state);
+    if (stateIndex >= m_stateReactions.size())
+        m_stateReactions.resize(stateIndex + 1);
+
+    m_stateReactions[stateIndex].append(WTFMove(promise));
 }
 
-void AudioContext::stopDispatch(void* userData)
+void AudioContext::setState(State state)
 {
-    AudioContext* context = reinterpret_cast<AudioContext*>(userData);
-    ASSERT(context);
-    if (!context)
+    if (m_state == state)
+        return;
+
+    m_state = state;
+    m_eventQueue->enqueueEvent(Event::create(eventNames().statechangeEvent, true, false));
+
+    size_t stateIndex = static_cast<size_t>(state);
+    if (stateIndex >= m_stateReactions.size())
         return;
 
-    context->uninitialize();
-    context->clear();
+    Vector<Promise> reactions;
+    m_stateReactions[stateIndex].swap(reactions);
+
+    for (auto& promise : reactions)
+        promise.resolve(nullptr);
+}
+
+const AtomicString& AudioContext::state() const
+{
+    static NeverDestroyed<AtomicString> suspended("suspended");
+    static NeverDestroyed<AtomicString> running("running");
+    static NeverDestroyed<AtomicString> interrupted("interrupted");
+    static NeverDestroyed<AtomicString> closed("closed");
+
+    switch (m_state) {
+    case State::Suspended:
+        return suspended;
+    case State::Running:
+        return running;
+    case State::Interrupted:
+        return interrupted;
+    case State::Closed:
+        return closed;
+    }
+
+    ASSERT_NOT_REACHED();
+    return suspended;
 }
 
 void AudioContext::stop()
 {
+    ASSERT(isMainThread());
+
     // Usually ScriptExecutionContext calls stop twice.
     if (m_isStopScheduled)
         return;
     m_isStopScheduled = true;
 
+    document()->updateIsPlayingMedia();
+
+    m_eventQueue->close();
+
     // Don't call uninitialize() immediately here because the ScriptExecutionContext is in the middle
     // of dealing with all of its ActiveDOMObjects at this point. uninitialize() can de-reference other
     // ActiveDOMObjects so let's schedule uninitialize() to be called later.
     // FIXME: see if there's a more direct way to handle this issue.
-    callOnMainThread(stopDispatch, this);
+    // FIXME: This sounds very wrong. The whole idea of stop() is that it stops everything, and if we
+    // schedule some observable work for later, the work likely happens at an inappropriate time.
+    callOnMainThread([this] {
+        uninitialize();
+        clear();
+    });
+}
+
+bool AudioContext::canSuspendForDocumentSuspension() const
+{
+    // FIXME: We should be able to suspend while rendering as well with some more code.
+    return m_state == State::Suspended || m_state == State::Closed;
+}
+
+const char* AudioContext::activeDOMObjectName() const
+{
+    return "AudioContext";
+}
+
+Document* AudioContext::document() const
+{
+    ASSERT(m_scriptExecutionContext);
+    return downcast<Document>(m_scriptExecutionContext);
+}
+
+const Document* AudioContext::hostingDocument() const
+{
+    return downcast<Document>(m_scriptExecutionContext);
 }
 
 PassRefPtr<AudioBuffer> AudioContext::createBuffer(unsigned numberOfChannels, size_t numberOfFrames, float sampleRate, ExceptionCode& ec)
 {
     RefPtr<AudioBuffer> audioBuffer = AudioBuffer::create(numberOfChannels, numberOfFrames, sampleRate);
     if (!audioBuffer.get()) {
-        ec = SYNTAX_ERR;
-        return 0;
+        ec = NOT_SUPPORTED_ERR;
+        return nullptr;
     }
 
     return audioBuffer;
@@ -341,13 +393,13 @@ PassRefPtr<AudioBuffer> AudioContext::createBuffer(ArrayBuffer* arrayBuffer, boo
     ASSERT(arrayBuffer);
     if (!arrayBuffer) {
         ec = SYNTAX_ERR;
-        return 0;
+        return nullptr;
     }
 
     RefPtr<AudioBuffer> audioBuffer = AudioBuffer::createFromAudioFileData(arrayBuffer->data(), arrayBuffer->byteLength(), mixToMono, sampleRate());
     if (!audioBuffer.get()) {
         ec = SYNTAX_ERR;
-        return 0;
+        return nullptr;
     }
 
     return audioBuffer;
@@ -381,7 +433,7 @@ PassRefPtr<MediaElementAudioSourceNode> AudioContext::createMediaElementSource(H
     ASSERT(mediaElement);
     if (!mediaElement) {
         ec = INVALID_STATE_ERR;
-        return 0;
+        return nullptr;
     }
         
     ASSERT(isMainThread());
@@ -390,7 +442,7 @@ PassRefPtr<MediaElementAudioSourceNode> AudioContext::createMediaElementSource(H
     // First check if this media element already has a source node.
     if (mediaElement->audioSourceNode()) {
         ec = INVALID_STATE_ERR;
-        return 0;
+        return nullptr;
     }
         
     RefPtr<MediaElementAudioSourceNode> node = MediaElementAudioSourceNode::create(this, mediaElement);
@@ -405,31 +457,40 @@ PassRefPtr<MediaElementAudioSourceNode> AudioContext::createMediaElementSource(H
 #if ENABLE(MEDIA_STREAM)
 PassRefPtr<MediaStreamAudioSourceNode> AudioContext::createMediaStreamSource(MediaStream* mediaStream, ExceptionCode& ec)
 {
+    ASSERT(isMainThread());
+
     ASSERT(mediaStream);
     if (!mediaStream) {
         ec = INVALID_STATE_ERR;
-        return 0;
+        return nullptr;
     }
 
-    ASSERT(isMainThread());
-    lazyInitialize();
+    auto audioTracks = mediaStream->getAudioTracks();
+    if (audioTracks.isEmpty()) {
+        ec = INVALID_STATE_ERR;
+        return nullptr;
+    }
 
-    AudioSourceProvider* provider = 0;
+    MediaStreamTrack* providerTrack = nullptr;
+    for (auto& track : audioTracks) {
+        if (track->audioSourceProvider()) {
+            providerTrack = track.get();
+            break;
+        }
+    }
 
-    if (mediaStream->isLocal() && mediaStream->audioTracks()->length())
-        provider = destination()->localAudioInputProvider();
-    else {
-        // FIXME: get a provider for non-local MediaStreams (like from a remote peer).
-        provider = 0;
+    if (!providerTrack) {
+        ec = INVALID_STATE_ERR;
+        return nullptr;
     }
 
-    RefPtr<MediaStreamAudioSourceNode> node = MediaStreamAudioSourceNode::create(this, mediaStream, provider);
+    lazyInitialize();
 
-    // FIXME: Only stereo streams are supported right now. We should be able to accept multi-channel streams.
+    auto node = MediaStreamAudioSourceNode::create(*this, *mediaStream, *providerTrack);
     node->setFormat(2, sampleRate());
 
-    refNode(node.get()); // context keeps reference until node is disconnected
-    return node;
+    refNode(&node.get()); // context keeps reference until node is disconnected
+    return &node.get();
 }
 
 PassRefPtr<MediaStreamAudioDestinationNode> AudioContext::createMediaStreamDestination()
@@ -460,8 +521,8 @@ PassRefPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(size_t buffe
     RefPtr<ScriptProcessorNode> node = ScriptProcessorNode::create(this, m_destinationNode->sampleRate(), bufferSize, numberOfInputChannels, numberOfOutputChannels);
 
     if (!node.get()) {
-        ec = SYNTAX_ERR;
-        return 0;
+        ec = INDEX_SIZE_ERR;
+        return nullptr;
     }
 
     refNode(node.get()); // context keeps reference until we stop making javascript rendering callbacks
@@ -529,7 +590,7 @@ PassRefPtr<DelayNode> AudioContext::createDelay(double maxDelayTime, ExceptionCo
     lazyInitialize();
     RefPtr<DelayNode> node = DelayNode::create(this, m_destinationNode->sampleRate(), maxDelayTime, ec);
     if (ec)
-        return 0;
+        return nullptr;
     return node;
 }
 
@@ -548,7 +609,7 @@ PassRefPtr<ChannelSplitterNode> AudioContext::createChannelSplitter(size_t numbe
 
     if (!node.get()) {
         ec = SYNTAX_ERR;
-        return 0;
+        return nullptr;
     }
 
     return node;
@@ -569,7 +630,7 @@ PassRefPtr<ChannelMergerNode> AudioContext::createChannelMerger(size_t numberOfI
 
     if (!node.get()) {
         ec = SYNTAX_ERR;
-        return 0;
+        return nullptr;
     }
 
     return node;
@@ -589,17 +650,17 @@ PassRefPtr<OscillatorNode> AudioContext::createOscillator()
     return node;
 }
 
-PassRefPtr<WaveTable> AudioContext::createWaveTable(Float32Array* real, Float32Array* imag, ExceptionCode& ec)
+PassRefPtr<PeriodicWave> AudioContext::createPeriodicWave(Float32Array* real, Float32Array* imag, ExceptionCode& ec)
 {
     ASSERT(isMainThread());
     
-    if (!real || !imag || (real->length() != imag->length())) {
+    if (!real || !imag || (real->length() != imag->length() || (real->length() > MaxPeriodicWaveLength) || (real->length() <= 0))) {
         ec = SYNTAX_ERR;
-        return 0;
+        return nullptr;
     }
     
     lazyInitialize();
-    return WaveTable::create(sampleRate(), real, imag);
+    return PeriodicWave::create(sampleRate(), real, imag);
 }
 
 void AudioContext::notifyNodeFinishedProcessing(AudioNode* node)
@@ -612,8 +673,8 @@ void AudioContext::derefFinishedSourceNodes()
 {
     ASSERT(isGraphOwner());
     ASSERT(isAudioThread() || isAudioThreadFinished());
-    for (unsigned i = 0; i < m_finishedNodes.size(); i++)
-        derefNode(m_finishedNodes[i]);
+    for (auto& node : m_finishedNodes)
+        derefNode(node);
 
     m_finishedNodes.clear();
 }
@@ -621,7 +682,7 @@ void AudioContext::derefFinishedSourceNodes()
 void AudioContext::refNode(AudioNode* node)
 {
     ASSERT(isMainThread());
-    AutoLocker locker(this);
+    AutoLocker locker(*this);
     
     node->ref(AudioNode::RefTypeConnection);
     m_referencedNodes.append(node);
@@ -633,19 +694,15 @@ void AudioContext::derefNode(AudioNode* node)
     
     node->deref(AudioNode::RefTypeConnection);
 
-    for (unsigned i = 0; i < m_referencedNodes.size(); ++i) {
-        if (node == m_referencedNodes[i]) {
-            m_referencedNodes.remove(i);
-            break;
-        }
-    }
+    ASSERT(m_referencedNodes.contains(node));
+    m_referencedNodes.removeFirst(node);
 }
 
 void AudioContext::derefUnfinishedSourceNodes()
 {
     ASSERT(isMainThread() && isAudioThreadFinished());
-    for (unsigned i = 0; i < m_referencedNodes.size(); ++i)
-        m_referencedNodes[i]->deref(AudioNode::RefTypeConnection);
+    for (auto& node : m_referencedNodes)
+        node->deref(AudioNode::RefTypeConnection);
 
     m_referencedNodes.clear();
 }
@@ -728,7 +785,7 @@ void AudioContext::addDeferredFinishDeref(AudioNode* node)
 void AudioContext::handlePreRenderTasks()
 {
     ASSERT(isAudioThread());
+
     // At the beginning of every render quantum, try to update the internal rendering graph state (from main thread changes).
     // It's OK if the tryLock() fails, we'll just take slightly longer to pick up the changes.
     bool mustReleaseLock;
@@ -747,8 +804,8 @@ void AudioContext::handlePreRenderTasks()
 void AudioContext::handlePostRenderTasks()
 {
     ASSERT(isAudioThread());
-    // Must use a tryLock() here too.  Don't worry, the lock will very rarely be contended and this method is called frequently.
+
+    // Must use a tryLock() here too. Don't worry, the lock will very rarely be contended and this method is called frequently.
     // The worst that can happen is that there will be some nodes which will take slightly longer than usual to be deleted or removed
     // from the render graph (in which case they'll render silence).
     bool mustReleaseLock;
@@ -777,10 +834,8 @@ void AudioContext::handlePostRenderTasks()
 void AudioContext::handleDeferredFinishDerefs()
 {
     ASSERT(isAudioThread() && isGraphOwner());
-    for (unsigned i = 0; i < m_deferredFinishDerefList.size(); ++i) {
-        AudioNode* node = m_deferredFinishDerefList[i];
+    for (auto& node : m_deferredFinishDerefList)
         node->finishDeref(AudioNode::RefTypeConnection);
-    }
     
     m_deferredFinishDerefList.clear();
 }
@@ -810,41 +865,29 @@ void AudioContext::scheduleNodeDeletion()
 
     // Make sure to call deleteMarkedNodes() on main thread.    
     if (m_nodesMarkedForDeletion.size() && !m_isDeletionScheduled) {
-        m_nodesToDelete.append(m_nodesMarkedForDeletion);
+        m_nodesToDelete.appendVector(m_nodesMarkedForDeletion);
         m_nodesMarkedForDeletion.clear();
 
         m_isDeletionScheduled = true;
 
-        // Don't let ourself get deleted before the callback.
-        // See matching deref() in deleteMarkedNodesDispatch().
-        ref();
-        callOnMainThread(deleteMarkedNodesDispatch, this);
+        RefPtr<AudioContext> strongThis(this);
+        callOnMainThread([strongThis] {
+            strongThis->deleteMarkedNodes();
+        });
     }
 }
 
-void AudioContext::deleteMarkedNodesDispatch(void* userData)
-{
-    AudioContext* context = reinterpret_cast<AudioContext*>(userData);
-    ASSERT(context);
-    if (!context)
-        return;
-
-    context->deleteMarkedNodes();
-    context->deref();
-}
-
 void AudioContext::deleteMarkedNodes()
 {
     ASSERT(isMainThread());
 
     // Protect this object from being deleted before we release the mutex locked by AutoLocker.
-    RefPtr<AudioContext> protect(this);
+    Ref<AudioContext> protect(*this);
     {
-        AutoLocker locker(this);
+        AutoLocker locker(*this);
 
-        while (size_t n = m_nodesToDelete.size()) {
-            AudioNode* node = m_nodesToDelete[n - 1];
-            m_nodesToDelete.removeLast();
+        while (m_nodesToDelete.size()) {
+            AudioNode* node = m_nodesToDelete.takeLast();
 
             // Before deleting the node, clear out any AudioNodeInputs from m_dirtySummingJunctions.
             unsigned numberOfInputs = node->numberOfInputs();
@@ -872,7 +915,7 @@ void AudioContext::markSummingJunctionDirty(AudioSummingJunction* summingJunctio
 void AudioContext::removeMarkedSummingJunction(AudioSummingJunction* summingJunction)
 {
     ASSERT(isMainThread());
-    AutoLocker locker(this);
+    AutoLocker locker(*this);
     m_dirtySummingJunctions.remove(summingJunction);
 }
 
@@ -886,8 +929,8 @@ void AudioContext::handleDirtyAudioSummingJunctions()
 {
     ASSERT(isGraphOwner());    
 
-    for (HashSet<AudioSummingJunction*>::iterator i = m_dirtySummingJunctions.begin(); i != m_dirtySummingJunctions.end(); ++i)
-        (*i)->updateRenderingState();
+    for (auto& junction : m_dirtySummingJunctions)
+        junction->updateRenderingState();
 
     m_dirtySummingJunctions.clear();
 }
@@ -896,8 +939,8 @@ void AudioContext::handleDirtyAudioNodeOutputs()
 {
     ASSERT(isGraphOwner());    
 
-    for (HashSet<AudioNodeOutput*>::iterator i = m_dirtyAudioNodeOutputs.begin(); i != m_dirtyAudioNodeOutputs.end(); ++i)
-        (*i)->updateRenderingState();
+    for (auto& output : m_dirtyAudioNodeOutputs)
+        output->updateRenderingState();
 
     m_dirtyAudioNodeOutputs.clear();
 }
@@ -906,20 +949,16 @@ void AudioContext::addAutomaticPullNode(AudioNode* node)
 {
     ASSERT(isGraphOwner());
 
-    if (!m_automaticPullNodes.contains(node)) {
-        m_automaticPullNodes.add(node);
+    if (m_automaticPullNodes.add(node).isNewEntry)
         m_automaticPullNodesNeedUpdating = true;
-    }
 }
 
 void AudioContext::removeAutomaticPullNode(AudioNode* node)
 {
     ASSERT(isGraphOwner());
 
-    if (m_automaticPullNodes.contains(node)) {
-        m_automaticPullNodes.remove(node);
+    if (m_automaticPullNodes.remove(node))
         m_automaticPullNodesNeedUpdating = true;
-    }
 }
 
 void AudioContext::updateAutomaticPullNodes()
@@ -930,11 +969,9 @@ void AudioContext::updateAutomaticPullNodes()
         // Copy from m_automaticPullNodes to m_renderingAutomaticPullNodes.
         m_renderingAutomaticPullNodes.resize(m_automaticPullNodes.size());
 
-        unsigned j = 0;
-        for (HashSet<AudioNode*>::iterator i = m_automaticPullNodes.begin(); i != m_automaticPullNodes.end(); ++i, ++j) {
-            AudioNode* output = *i;
-            m_renderingAutomaticPullNodes[j] = output;
-        }
+        unsigned i = 0;
+        for (auto& output : m_automaticPullNodes)
+            m_renderingAutomaticPullNodes[i++] = output;
 
         m_automaticPullNodesNeedUpdating = false;
     }
@@ -944,23 +981,105 @@ void AudioContext::processAutomaticPullNodes(size_t framesToProcess)
 {
     ASSERT(isAudioThread());
 
-    for (unsigned i = 0; i < m_renderingAutomaticPullNodes.size(); ++i)
-        m_renderingAutomaticPullNodes[i]->processIfNecessary(framesToProcess);
+    for (auto& node : m_renderingAutomaticPullNodes)
+        node->processIfNecessary(framesToProcess);
 }
 
-const AtomicString& AudioContext::interfaceName() const
+ScriptExecutionContext* AudioContext::scriptExecutionContext() const
 {
-    return eventNames().interfaceForAudioContext;
+    return m_isStopScheduled ? 0 : ActiveDOMObject::scriptExecutionContext();
 }
 
-ScriptExecutionContext* AudioContext::scriptExecutionContext() const
+void AudioContext::nodeWillBeginPlayback()
 {
-    return m_isStopScheduled ? 0 : ActiveDOMObject::scriptExecutionContext();
+    // Called by scheduled AudioNodes when clients schedule their start times.
+    // Prior to the introduction of suspend(), resume(), and stop(), starting
+    // a scheduled AudioNode would remove the user-gesture restriction, if present,
+    // and would thus unmute the context. Now that AudioContext stays in the
+    // "suspended" state if a user-gesture restriction is present, starting a
+    // schedule AudioNode should set the state to "running", but only if the
+    // user-gesture restriction is set.
+    if (userGestureRequiredForAudioStart())
+        startRendering();
+}
+
+bool AudioContext::willBeginPlayback()
+{
+    if (userGestureRequiredForAudioStart()) {
+        if (!ScriptController::processingUserGestureForMedia())
+            return false;
+        removeBehaviorRestriction(AudioContext::RequireUserGestureForAudioStartRestriction);
+    }
+
+    if (pageConsentRequiredForAudioStart()) {
+        Page* page = document()->page();
+        if (page && !page->canStartMedia()) {
+            document()->addMediaCanStartListener(this);
+            return false;
+        }
+        removeBehaviorRestriction(AudioContext::RequirePageConsentForAudioStartRestriction);
+    }
+
+    return m_mediaSession->clientWillBeginPlayback();
+}
+
+bool AudioContext::willPausePlayback()
+{
+    if (userGestureRequiredForAudioStart()) {
+        if (!ScriptController::processingUserGestureForMedia())
+            return false;
+        removeBehaviorRestriction(AudioContext::RequireUserGestureForAudioStartRestriction);
+    }
+
+    if (pageConsentRequiredForAudioStart()) {
+        Page* page = document()->page();
+        if (page && !page->canStartMedia()) {
+            document()->addMediaCanStartListener(this);
+            return false;
+        }
+        removeBehaviorRestriction(AudioContext::RequirePageConsentForAudioStartRestriction);
+    }
+    
+    return m_mediaSession->clientWillPausePlayback();
 }
 
 void AudioContext::startRendering()
 {
+    if (!willBeginPlayback())
+        return;
+
     destination()->startRendering();
+    setState(State::Running);
+}
+
+void AudioContext::mediaCanStart()
+{
+    removeBehaviorRestriction(AudioContext::RequirePageConsentForAudioStartRestriction);
+}
+
+MediaProducer::MediaStateFlags AudioContext::mediaState() const
+{
+    if (!m_isStopScheduled && m_destinationNode && m_destinationNode->isPlayingAudio())
+        return MediaProducer::IsPlayingAudio;
+
+    return MediaProducer::IsNotPlaying;
+}
+
+void AudioContext::pageMutedStateDidChange()
+{
+    if (m_destinationNode && document()->page())
+        m_destinationNode->setMuted(document()->page()->isMuted());
+}
+
+void AudioContext::isPlayingAudioDidChange()
+{
+    // Make sure to call Document::updateIsPlayingMedia() on the main thread, since
+    // we could be on the audio I/O thread here and the call into WebCore could block.
+    RefPtr<AudioContext> strongThis(this);
+    callOnMainThread([strongThis] {
+        if (strongThis->document())
+            strongThis->document()->updateIsPlayingMedia();
+    });
 }
 
 void AudioContext::fireCompletionEvent()
@@ -970,6 +1089,7 @@ void AudioContext::fireCompletionEvent()
         return;
         
     AudioBuffer* renderedBuffer = m_renderTarget.get();
+    setState(State::Closed);
 
     ASSERT(renderedBuffer);
     if (!renderedBuffer)
@@ -978,44 +1098,146 @@ void AudioContext::fireCompletionEvent()
     // Avoid firing the event if the document has already gone away.
     if (scriptExecutionContext()) {
         // Call the offline rendering completion event listener.
-        dispatchEvent(OfflineAudioCompletionEvent::create(renderedBuffer));
+        m_eventQueue->enqueueEvent(OfflineAudioCompletionEvent::create(renderedBuffer));
     }
 }
 
 void AudioContext::incrementActiveSourceCount()
 {
-    atomicIncrement(&m_activeSourceCount);
+    ++m_activeSourceCount;
 }
 
 void AudioContext::decrementActiveSourceCount()
 {
-    atomicDecrement(&m_activeSourceCount);
+    --m_activeSourceCount;
 }
 
-void AudioContext::reportMemoryUsage(MemoryObjectInfo* memoryObjectInfo) const
+void AudioContext::suspend(Promise&& promise)
 {
-    AutoLocker locker(const_cast<AudioContext*>(this));
+    if (isOfflineContext()) {
+        promise.reject(INVALID_STATE_ERR);
+        return;
+    }
+
+    if (m_state == State::Suspended) {
+        promise.resolve(nullptr);
+        return;
+    }
+
+    if (m_state == State::Closed || m_state == State::Interrupted || !m_destinationNode) {
+        promise.reject(0);
+        return;
+    }
+
+    addReaction(State::Suspended, WTFMove(promise));
+
+    if (!willPausePlayback())
+        return;
+
+    lazyInitialize();
 
-    MemoryClassInfo info(memoryObjectInfo, this, WebCoreMemoryTypes::Audio);
-    ActiveDOMObject::reportMemoryUsage(memoryObjectInfo);
-    info.addMember(m_destinationNode);
-    info.addMember(m_listener);
-    info.addMember(m_finishedNodes);
-    info.addMember(m_referencedNodes);
-    info.addMember(m_nodesMarkedForDeletion);
-    info.addMember(m_nodesToDelete);
-    info.addMember(m_dirtySummingJunctions);
-    info.addMember(m_dirtyAudioNodeOutputs);
-    info.addMember(m_automaticPullNodes);
-    info.addMember(m_renderingAutomaticPullNodes);
-    info.addMember(m_contextGraphMutex);
-    info.addMember(m_deferredFinishDerefList);
-    info.addMember(m_hrtfDatabaseLoader);
-    info.addMember(m_eventTargetData);
-    info.addMember(m_renderTarget);
-    info.addMember(m_audioDecoder);
+    RefPtr<AudioContext> strongThis(this);
+    m_destinationNode->suspend([strongThis] {
+        strongThis->setState(State::Suspended);
+    });
 }
 
+void AudioContext::resume(Promise&& promise)
+{
+    if (isOfflineContext()) {
+        promise.reject(INVALID_STATE_ERR);
+        return;
+    }
+
+    if (m_state == State::Running) {
+        promise.resolve(nullptr);
+        return;
+    }
+
+    if (m_state == State::Closed || !m_destinationNode) {
+        promise.reject(0);
+        return;
+    }
+
+    addReaction(State::Running, WTFMove(promise));
+
+    if (!willBeginPlayback())
+        return;
+
+    lazyInitialize();
+
+    RefPtr<AudioContext> strongThis(this);
+    m_destinationNode->resume([strongThis] {
+        strongThis->setState(State::Running);
+    });
+}
+
+void AudioContext::close(Promise&& promise)
+{
+    if (isOfflineContext()) {
+        promise.reject(INVALID_STATE_ERR);
+        return;
+    }
+
+    if (m_state == State::Closed || !m_destinationNode) {
+        promise.resolve(nullptr);
+        return;
+    }
+
+    addReaction(State::Closed, WTFMove(promise));
+
+    lazyInitialize();
+
+    RefPtr<AudioContext> strongThis(this);
+    m_destinationNode->close([strongThis] {
+        strongThis->setState(State::Closed);
+        strongThis->uninitialize();
+    });
+}
+
+
+void AudioContext::suspendPlayback()
+{
+    if (!m_destinationNode || m_state == State::Closed)
+        return;
+
+    if (m_state == State::Suspended) {
+        if (m_mediaSession->state() == PlatformMediaSession::Interrupted)
+            setState(State::Interrupted);
+        return;
+    }
+
+    lazyInitialize();
+
+    RefPtr<AudioContext> strongThis(this);
+    m_destinationNode->suspend([strongThis] {
+        bool interrupted = strongThis->m_mediaSession->state() == PlatformMediaSession::Interrupted;
+        strongThis->setState(interrupted ? State::Interrupted : State::Suspended);
+    });
+}
+
+void AudioContext::mayResumePlayback(bool shouldResume)
+{
+    if (!m_destinationNode || m_state == State::Closed || m_state == State::Running)
+        return;
+
+    if (!shouldResume) {
+        setState(State::Suspended);
+        return;
+    }
+
+    if (!willBeginPlayback())
+        return;
+
+    lazyInitialize();
+
+    RefPtr<AudioContext> strongThis(this);
+    m_destinationNode->resume([strongThis] {
+        strongThis->setState(State::Running);
+    });
+}
+
+
 } // namespace WebCore
 
 #endif // ENABLE(WEB_AUDIO)