Replace WTF::move with WTFMove
[WebKit-https.git] / Source / WebCore / Modules / webaudio / AudioContext.cpp
index 9b3cfcb..ef28f97 100644 (file)
@@ -51,6 +51,7 @@
 #include "GenericEventQueue.h"
 #include "HRTFDatabaseLoader.h"
 #include "HRTFPanner.h"
+#include "JSDOMPromise.h"
 #include "OfflineAudioCompletionEvent.h"
 #include "OfflineAudioDestinationNode.h"
 #include "OscillatorNode.h"
@@ -123,13 +124,13 @@ RefPtr<AudioContext> AudioContext::create(Document& document, ExceptionCode& ec)
 
     RefPtr<AudioContext> audioContext(adoptRef(new AudioContext(document)));
     audioContext->suspendIfNeeded();
-    return audioContext.release();
+    return audioContext;
 }
 
 // Constructor for rendering to the audio hardware.
 AudioContext::AudioContext(Document& document)
     : ActiveDOMObject(&document)
-    , m_mediaSession(MediaSession::create(*this))
+    , m_mediaSession(PlatformMediaSession::create(*this))
     , m_eventQueue(std::make_unique<GenericEventQueue>(*this))
     , m_graphOwnerThread(UndefinedThreadIdentifier)
 {
@@ -145,7 +146,7 @@ AudioContext::AudioContext(Document& document)
 AudioContext::AudioContext(Document& document, unsigned numberOfChannels, size_t numberOfFrames, float sampleRate)
     : ActiveDOMObject(&document)
     , m_isOfflineContext(true)
-    , m_mediaSession(MediaSession::create(*this))
+    , m_mediaSession(PlatformMediaSession::create(*this))
     , m_eventQueue(std::make_unique<GenericEventQueue>(*this))
     , m_graphOwnerThread(UndefinedThreadIdentifier)
 {
@@ -171,7 +172,7 @@ void AudioContext::constructCommon()
     m_listener = AudioListener::create();
 
 #if PLATFORM(IOS)
-    if (!document()->settings() || document()->settings()->mediaPlaybackRequiresUserGesture())
+    if (!document()->settings() || document()->settings()->requiresUserGestureForMediaPlayback())
         addBehaviorRestriction(RequireUserGestureForAudioStartRestriction);
     else
         m_restrictions = NoRestrictions;
@@ -180,6 +181,8 @@ void AudioContext::constructCommon()
 #if PLATFORM(COCOA)
     addBehaviorRestriction(RequirePageConsentForAudioStartRestriction);
 #endif
+
+    m_mediaSession->setCanProduceAudio(true);
 }
 
 AudioContext::~AudioContext()
@@ -187,49 +190,50 @@ AudioContext::~AudioContext()
 #if DEBUG_AUDIONODE_REFERENCES
     fprintf(stderr, "%p: AudioContext::~AudioContext()\n", this);
 #endif
-    // AudioNodes keep a reference to their context, so there should be no way to be in the destructor if there are still AudioNodes around.
     ASSERT(!m_isInitialized);
     ASSERT(m_isStopScheduled);
-    ASSERT(!m_nodesToDelete.size());
-    ASSERT(!m_referencedNodes.size());
-    ASSERT(!m_finishedNodes.size());
-    ASSERT(!m_automaticPullNodes.size());
+    ASSERT(m_nodesToDelete.isEmpty());
+    ASSERT(m_referencedNodes.isEmpty());
+    ASSERT(m_finishedNodes.isEmpty()); // FIXME (bug 105870): This assertion fails on tests sometimes.
+    ASSERT(m_automaticPullNodes.isEmpty());
     if (m_automaticPullNodesNeedUpdating)
         m_renderingAutomaticPullNodes.resize(m_automaticPullNodes.size());
-    ASSERT(!m_renderingAutomaticPullNodes.size());
+    ASSERT(m_renderingAutomaticPullNodes.isEmpty());
+    // FIXME: Can we assert that m_deferredFinishDerefList is empty?
 }
 
 void AudioContext::lazyInitialize()
 {
-    if (!m_isInitialized) {
-        // Don't allow the context to initialize a second time after it's already been explicitly uninitialized.
-        ASSERT(!m_isAudioThreadFinished);
-        if (!m_isAudioThreadFinished) {
-            if (m_destinationNode.get()) {
-                m_destinationNode->initialize();
-
-                if (!isOfflineContext()) {
-                    document()->addAudioProducer(this);
-
-                    // This starts the audio thread. The destination node's provideInput() method will now be called repeatedly to render audio.
-                    // Each time provideInput() is called, a portion of the audio stream is rendered. Let's call this time period a "render quantum".
-                    // NOTE: for now default AudioContext does not need an explicit startRendering() call from JavaScript.
-                    // We may want to consider requiring it for symmetry with OfflineAudioContext.
-                    startRendering();
-                    ++s_hardwareContextCount;
-                }
-
-            }
-            m_isInitialized = true;
+    if (m_isInitialized)
+        return;
+
+    // Don't allow the context to initialize a second time after it's already been explicitly uninitialized.
+    ASSERT(!m_isAudioThreadFinished);
+    if (m_isAudioThreadFinished)
+        return;
+
+    if (m_destinationNode.get()) {
+        m_destinationNode->initialize();
+
+        if (!isOfflineContext()) {
+            document()->addAudioProducer(this);
+
+            // This starts the audio thread. The destination node's provideInput() method will now be called repeatedly to render audio.
+            // Each time provideInput() is called, a portion of the audio stream is rendered. Let's call this time period a "render quantum".
+            // NOTE: for now default AudioContext does not need an explicit startRendering() call from JavaScript.
+            // We may want to consider requiring it for symmetry with OfflineAudioContext.
+            startRendering();
+            ++s_hardwareContextCount;
         }
     }
+    m_isInitialized = true;
 }
 
 void AudioContext::clear()
 {
     // We have to release our reference to the destination node before the context will ever be deleted since the destination node holds a reference to the context.
     if (m_destinationNode)
-        m_destinationNode.clear();
+        m_destinationNode = nullptr;
 
     // Audio thread is dead. Nobody will schedule node deletion action. Let's do it ourselves.
     do {
@@ -276,13 +280,13 @@ bool AudioContext::isInitialized() const
     return m_isInitialized;
 }
 
-void AudioContext::addReaction(State state, std::function<void()> reaction)
+void AudioContext::addReaction(State state, Promise&& promise)
 {
     size_t stateIndex = static_cast<size_t>(state);
     if (stateIndex >= m_stateReactions.size())
         m_stateReactions.resize(stateIndex + 1);
 
-    m_stateReactions[stateIndex].append(reaction);
+    m_stateReactions[stateIndex].append(WTFMove(promise));
 }
 
 void AudioContext::setState(State state)
@@ -297,11 +301,11 @@ void AudioContext::setState(State state)
     if (stateIndex >= m_stateReactions.size())
         return;
 
-    Vector<std::function<void()>> reactions;
+    Vector<Promise> reactions;
     m_stateReactions[stateIndex].swap(reactions);
 
-    for (auto& reaction : reactions)
-        reaction();
+    for (auto& promise : reactions)
+        promise.resolve(nullptr);
 }
 
 const AtomicString& AudioContext::state() const
@@ -326,25 +330,16 @@ const AtomicString& AudioContext::state() const
     return suspended;
 }
 
-void AudioContext::stopDispatch(void* userData)
-{
-    AudioContext* context = reinterpret_cast<AudioContext*>(userData);
-    ASSERT(context);
-    if (!context)
-        return;
-
-    context->uninitialize();
-    context->clear();
-}
-
 void AudioContext::stop()
 {
+    ASSERT(isMainThread());
+
     // Usually ScriptExecutionContext calls stop twice.
     if (m_isStopScheduled)
         return;
     m_isStopScheduled = true;
 
-    document()->updateIsPlayingAudio();
+    document()->updateIsPlayingMedia();
 
     m_eventQueue->close();
 
@@ -352,10 +347,15 @@ void AudioContext::stop()
     // of dealing with all of its ActiveDOMObjects at this point. uninitialize() can de-reference other
     // ActiveDOMObjects so let's schedule uninitialize() to be called later.
     // FIXME: see if there's a more direct way to handle this issue.
-    callOnMainThread(stopDispatch, this);
+    // FIXME: This sounds very wrong. The whole idea of stop() is that it stops everything, and if we
+    // schedule some observable work for later, the work likely happens at an inappropriate time.
+    callOnMainThread([this] {
+        uninitialize();
+        clear();
+    });
 }
 
-bool AudioContext::canSuspend() const
+bool AudioContext::canSuspendForDocumentSuspension() const
 {
     // FIXME: We should be able to suspend while rendering as well with some more code.
     return m_state == State::Suspended || m_state == State::Closed;
@@ -372,6 +372,11 @@ Document* AudioContext::document() const
     return downcast<Document>(m_scriptExecutionContext);
 }
 
+const Document* AudioContext::hostingDocument() const
+{
+    return downcast<Document>(m_scriptExecutionContext);
+}
+
 PassRefPtr<AudioBuffer> AudioContext::createBuffer(unsigned numberOfChannels, size_t numberOfFrames, float sampleRate, ExceptionCode& ec)
 {
     RefPtr<AudioBuffer> audioBuffer = AudioBuffer::create(numberOfChannels, numberOfFrames, sampleRate);
@@ -452,39 +457,40 @@ PassRefPtr<MediaElementAudioSourceNode> AudioContext::createMediaElementSource(H
 #if ENABLE(MEDIA_STREAM)
 PassRefPtr<MediaStreamAudioSourceNode> AudioContext::createMediaStreamSource(MediaStream* mediaStream, ExceptionCode& ec)
 {
+    ASSERT(isMainThread());
+
     ASSERT(mediaStream);
     if (!mediaStream) {
         ec = INVALID_STATE_ERR;
         return nullptr;
     }
 
-    ASSERT(isMainThread());
-    lazyInitialize();
-
-    AudioSourceProvider* provider = 0;
-
-    Vector<RefPtr<MediaStreamTrack>> audioTracks = mediaStream->getAudioTracks();
-    RefPtr<MediaStreamTrack> audioTrack;
+    auto audioTracks = mediaStream->getAudioTracks();
+    if (audioTracks.isEmpty()) {
+        ec = INVALID_STATE_ERR;
+        return nullptr;
+    }
 
-    // FIXME: get a provider for non-local MediaStreams (like from a remote peer).
-    for (size_t i = 0; i < audioTracks.size(); ++i) {
-        audioTrack = audioTracks[i];
-        if (audioTrack->source()->isAudioStreamSource()) {
-            auto source = static_cast<MediaStreamAudioSource*>(audioTrack->source());
-            ASSERT(!source->deviceId().isEmpty());
-            destination()->enableInput(source->deviceId());
-            provider = destination()->localAudioInputProvider();
+    MediaStreamTrack* providerTrack = nullptr;
+    for (auto& track : audioTracks) {
+        if (track->audioSourceProvider()) {
+            providerTrack = track.get();
             break;
         }
     }
 
-    RefPtr<MediaStreamAudioSourceNode> node = MediaStreamAudioSourceNode::create(this, mediaStream, audioTrack.get(), provider);
+    if (!providerTrack) {
+        ec = INVALID_STATE_ERR;
+        return nullptr;
+    }
 
-    // FIXME: Only stereo streams are supported right now. We should be able to accept multi-channel streams.
+    lazyInitialize();
+
+    auto node = MediaStreamAudioSourceNode::create(*this, *mediaStream, *providerTrack);
     node->setFormat(2, sampleRate());
 
-    refNode(node.get()); // context keeps reference until node is disconnected
-    return node;
+    refNode(&node.get()); // context keeps reference until node is disconnected
+    return &node.get();
 }
 
 PassRefPtr<MediaStreamAudioDestinationNode> AudioContext::createMediaStreamDestination()
@@ -667,8 +673,8 @@ void AudioContext::derefFinishedSourceNodes()
 {
     ASSERT(isGraphOwner());
     ASSERT(isAudioThread() || isAudioThreadFinished());
-    for (unsigned i = 0; i < m_finishedNodes.size(); i++)
-        derefNode(m_finishedNodes[i]);
+    for (auto& node : m_finishedNodes)
+        derefNode(node);
 
     m_finishedNodes.clear();
 }
@@ -688,19 +694,15 @@ void AudioContext::derefNode(AudioNode* node)
     
     node->deref(AudioNode::RefTypeConnection);
 
-    for (unsigned i = 0; i < m_referencedNodes.size(); ++i) {
-        if (node == m_referencedNodes[i]) {
-            m_referencedNodes.remove(i);
-            break;
-        }
-    }
+    ASSERT(m_referencedNodes.contains(node));
+    m_referencedNodes.removeFirst(node);
 }
 
 void AudioContext::derefUnfinishedSourceNodes()
 {
     ASSERT(isMainThread() && isAudioThreadFinished());
-    for (unsigned i = 0; i < m_referencedNodes.size(); ++i)
-        m_referencedNodes[i]->deref(AudioNode::RefTypeConnection);
+    for (auto& node : m_referencedNodes)
+        node->deref(AudioNode::RefTypeConnection);
 
     m_referencedNodes.clear();
 }
@@ -832,10 +834,8 @@ void AudioContext::handlePostRenderTasks()
 void AudioContext::handleDeferredFinishDerefs()
 {
     ASSERT(isAudioThread() && isGraphOwner());
-    for (unsigned i = 0; i < m_deferredFinishDerefList.size(); ++i) {
-        AudioNode* node = m_deferredFinishDerefList[i];
+    for (auto& node : m_deferredFinishDerefList)
         node->finishDeref(AudioNode::RefTypeConnection);
-    }
     
     m_deferredFinishDerefList.clear();
 }
@@ -870,24 +870,13 @@ void AudioContext::scheduleNodeDeletion()
 
         m_isDeletionScheduled = true;
 
-        // Don't let ourself get deleted before the callback.
-        // See matching deref() in deleteMarkedNodesDispatch().
-        ref();
-        callOnMainThread(deleteMarkedNodesDispatch, this);
+        RefPtr<AudioContext> strongThis(this);
+        callOnMainThread([strongThis] {
+            strongThis->deleteMarkedNodes();
+        });
     }
 }
 
-void AudioContext::deleteMarkedNodesDispatch(void* userData)
-{
-    AudioContext* context = reinterpret_cast<AudioContext*>(userData);
-    ASSERT(context);
-    if (!context)
-        return;
-
-    context->deleteMarkedNodes();
-    context->deref();
-}
-
 void AudioContext::deleteMarkedNodes()
 {
     ASSERT(isMainThread());
@@ -897,9 +886,8 @@ void AudioContext::deleteMarkedNodes()
     {
         AutoLocker locker(*this);
 
-        while (size_t n = m_nodesToDelete.size()) {
-            AudioNode* node = m_nodesToDelete[n - 1];
-            m_nodesToDelete.removeLast();
+        while (m_nodesToDelete.size()) {
+            AudioNode* node = m_nodesToDelete.takeLast();
 
             // Before deleting the node, clear out any AudioNodeInputs from m_dirtySummingJunctions.
             unsigned numberOfInputs = node->numberOfInputs();
@@ -941,8 +929,8 @@ void AudioContext::handleDirtyAudioSummingJunctions()
 {
     ASSERT(isGraphOwner());    
 
-    for (HashSet<AudioSummingJunction*>::iterator i = m_dirtySummingJunctions.begin(); i != m_dirtySummingJunctions.end(); ++i)
-        (*i)->updateRenderingState();
+    for (auto& junction : m_dirtySummingJunctions)
+        junction->updateRenderingState();
 
     m_dirtySummingJunctions.clear();
 }
@@ -951,8 +939,8 @@ void AudioContext::handleDirtyAudioNodeOutputs()
 {
     ASSERT(isGraphOwner());    
 
-    for (HashSet<AudioNodeOutput*>::iterator i = m_dirtyAudioNodeOutputs.begin(); i != m_dirtyAudioNodeOutputs.end(); ++i)
-        (*i)->updateRenderingState();
+    for (auto& output : m_dirtyAudioNodeOutputs)
+        output->updateRenderingState();
 
     m_dirtyAudioNodeOutputs.clear();
 }
@@ -981,11 +969,9 @@ void AudioContext::updateAutomaticPullNodes()
         // Copy from m_automaticPullNodes to m_renderingAutomaticPullNodes.
         m_renderingAutomaticPullNodes.resize(m_automaticPullNodes.size());
 
-        unsigned j = 0;
-        for (HashSet<AudioNode*>::iterator i = m_automaticPullNodes.begin(); i != m_automaticPullNodes.end(); ++i, ++j) {
-            AudioNode* output = *i;
-            m_renderingAutomaticPullNodes[j] = output;
-        }
+        unsigned i = 0;
+        for (auto& output : m_automaticPullNodes)
+            m_renderingAutomaticPullNodes[i++] = output;
 
         m_automaticPullNodesNeedUpdating = false;
     }
@@ -995,8 +981,8 @@ void AudioContext::processAutomaticPullNodes(size_t framesToProcess)
 {
     ASSERT(isAudioThread());
 
-    for (unsigned i = 0; i < m_renderingAutomaticPullNodes.size(); ++i)
-        m_renderingAutomaticPullNodes[i]->processIfNecessary(framesToProcess);
+    for (auto& node : m_renderingAutomaticPullNodes)
+        node->processIfNecessary(framesToProcess);
 }
 
 ScriptExecutionContext* AudioContext::scriptExecutionContext() const
@@ -1004,18 +990,64 @@ ScriptExecutionContext* AudioContext::scriptExecutionContext() const
     return m_isStopScheduled ? 0 : ActiveDOMObject::scriptExecutionContext();
 }
 
-void AudioContext::startRendering()
+void AudioContext::nodeWillBeginPlayback()
+{
+    // Called by scheduled AudioNodes when clients schedule their start times.
+    // Prior to the introduction of suspend(), resume(), and stop(), starting
+    // a scheduled AudioNode would remove the user-gesture restriction, if present,
+    // and would thus unmute the context. Now that AudioContext stays in the
+    // "suspended" state if a user-gesture restriction is present, starting a
+    // schedule AudioNode should set the state to "running", but only if the
+    // user-gesture restriction is set.
+    if (userGestureRequiredForAudioStart())
+        startRendering();
+}
+
+bool AudioContext::willBeginPlayback()
 {
-    if (ScriptController::processingUserGesture())
+    if (userGestureRequiredForAudioStart()) {
+        if (!ScriptController::processingUserGestureForMedia())
+            return false;
         removeBehaviorRestriction(AudioContext::RequireUserGestureForAudioStartRestriction);
+    }
 
     if (pageConsentRequiredForAudioStart()) {
         Page* page = document()->page();
-        if (page && !page->canStartMedia())
+        if (page && !page->canStartMedia()) {
             document()->addMediaCanStartListener(this);
-        else
-            removeBehaviorRestriction(AudioContext::RequirePageConsentForAudioStartRestriction);
+            return false;
+        }
+        removeBehaviorRestriction(AudioContext::RequirePageConsentForAudioStartRestriction);
+    }
+
+    return m_mediaSession->clientWillBeginPlayback();
+}
+
+bool AudioContext::willPausePlayback()
+{
+    if (userGestureRequiredForAudioStart()) {
+        if (!ScriptController::processingUserGestureForMedia())
+            return false;
+        removeBehaviorRestriction(AudioContext::RequireUserGestureForAudioStartRestriction);
     }
+
+    if (pageConsentRequiredForAudioStart()) {
+        Page* page = document()->page();
+        if (page && !page->canStartMedia()) {
+            document()->addMediaCanStartListener(this);
+            return false;
+        }
+        removeBehaviorRestriction(AudioContext::RequirePageConsentForAudioStartRestriction);
+    }
+    
+    return m_mediaSession->clientWillPausePlayback();
+}
+
+void AudioContext::startRendering()
+{
+    if (!willBeginPlayback())
+        return;
+
     destination()->startRendering();
     setState(State::Running);
 }
@@ -1025,9 +1057,12 @@ void AudioContext::mediaCanStart()
     removeBehaviorRestriction(AudioContext::RequirePageConsentForAudioStartRestriction);
 }
 
-bool AudioContext::isPlayingAudio()
+MediaProducer::MediaStateFlags AudioContext::mediaState() const
 {
-    return !m_isStopScheduled && m_destinationNode && m_destinationNode->isPlayingAudio();
+    if (!m_isStopScheduled && m_destinationNode && m_destinationNode->isPlayingAudio())
+        return MediaProducer::IsPlayingAudio;
+
+    return MediaProducer::IsNotPlaying;
 }
 
 void AudioContext::pageMutedStateDidChange()
@@ -1038,7 +1073,13 @@ void AudioContext::pageMutedStateDidChange()
 
 void AudioContext::isPlayingAudioDidChange()
 {
-    document()->updateIsPlayingAudio();
+    // Make sure to call Document::updateIsPlayingMedia() on the main thread, since
+    // we could be on the audio I/O thread here and the call into WebCore could block.
+    RefPtr<AudioContext> strongThis(this);
+    callOnMainThread([strongThis] {
+        if (strongThis->document())
+            strongThis->document()->updateIsPlayingMedia();
+    });
 }
 
 void AudioContext::fireCompletionEvent()
@@ -1071,86 +1112,84 @@ void AudioContext::decrementActiveSourceCount()
     --m_activeSourceCount;
 }
 
-void AudioContext::suspendContext(std::function<void()> successCallback, std::function<void()> failureCallback, ExceptionCode& ec)
+void AudioContext::suspend(Promise&& promise)
 {
-    ASSERT(successCallback);
-    ASSERT(failureCallback);
-
     if (isOfflineContext()) {
-        ec = INVALID_STATE_ERR;
+        promise.reject(INVALID_STATE_ERR);
         return;
     }
 
     if (m_state == State::Suspended) {
-        scriptExecutionContext()->postTask(successCallback);
+        promise.resolve(nullptr);
         return;
     }
 
     if (m_state == State::Closed || m_state == State::Interrupted || !m_destinationNode) {
-        scriptExecutionContext()->postTask(failureCallback);
+        promise.reject(0);
         return;
     }
 
-    addReaction(State::Suspended, successCallback);
+    addReaction(State::Suspended, WTFMove(promise));
 
-    if (!m_mediaSession->clientWillPausePlayback())
+    if (!willPausePlayback())
         return;
 
+    lazyInitialize();
+
     RefPtr<AudioContext> strongThis(this);
     m_destinationNode->suspend([strongThis] {
         strongThis->setState(State::Suspended);
     });
 }
 
-void AudioContext::resumeContext(std::function<void()> successCallback, std::function<void()> failureCallback, ExceptionCode& ec)
+void AudioContext::resume(Promise&& promise)
 {
-    ASSERT(successCallback);
-    ASSERT(failureCallback);
-
     if (isOfflineContext()) {
-        ec = INVALID_STATE_ERR;
+        promise.reject(INVALID_STATE_ERR);
         return;
     }
 
     if (m_state == State::Running) {
-        scriptExecutionContext()->postTask(successCallback);
+        promise.resolve(nullptr);
         return;
     }
 
     if (m_state == State::Closed || !m_destinationNode) {
-        scriptExecutionContext()->postTask(failureCallback);
+        promise.reject(0);
         return;
     }
 
-    addReaction(State::Running, successCallback);
+    addReaction(State::Running, WTFMove(promise));
 
-    if (!m_mediaSession->clientWillBeginPlayback())
+    if (!willBeginPlayback())
         return;
 
+    lazyInitialize();
+
     RefPtr<AudioContext> strongThis(this);
     m_destinationNode->resume([strongThis] {
         strongThis->setState(State::Running);
     });
 }
 
-void AudioContext::closeContext(std::function<void()> successCallback, std::function<void()>, ExceptionCode& ec)
+void AudioContext::close(Promise&& promise)
 {
-    ASSERT(successCallback);
-
     if (isOfflineContext()) {
-        ec = INVALID_STATE_ERR;
+        promise.reject(INVALID_STATE_ERR);
         return;
     }
 
     if (m_state == State::Closed || !m_destinationNode) {
-        scriptExecutionContext()->postTask(successCallback);
+        promise.resolve(nullptr);
         return;
     }
 
-    addReaction(State::Closed, successCallback);
+    addReaction(State::Closed, WTFMove(promise));
+
+    lazyInitialize();
 
     RefPtr<AudioContext> strongThis(this);
-    m_destinationNode->close([strongThis, successCallback] {
+    m_destinationNode->close([strongThis] {
         strongThis->setState(State::Closed);
         strongThis->uninitialize();
     });
@@ -1163,14 +1202,16 @@ void AudioContext::suspendPlayback()
         return;
 
     if (m_state == State::Suspended) {
-        if (m_mediaSession->state() == MediaSession::Interrupted)
+        if (m_mediaSession->state() == PlatformMediaSession::Interrupted)
             setState(State::Interrupted);
         return;
     }
 
+    lazyInitialize();
+
     RefPtr<AudioContext> strongThis(this);
     m_destinationNode->suspend([strongThis] {
-        bool interrupted = strongThis->m_mediaSession->state() == MediaSession::Interrupted;
+        bool interrupted = strongThis->m_mediaSession->state() == PlatformMediaSession::Interrupted;
         strongThis->setState(interrupted ? State::Interrupted : State::Suspended);
     });
 }
@@ -1185,6 +1226,11 @@ void AudioContext::mayResumePlayback(bool shouldResume)
         return;
     }
 
+    if (!willBeginPlayback())
+        return;
+
+    lazyInitialize();
+
     RefPtr<AudioContext> strongThis(this);
     m_destinationNode->resume([strongThis] {
         strongThis->setState(State::Running);