Clean up BaseAudioContext now that legacy/prefixed WebAudio is gone
authorcdumez@apple.com <cdumez@apple.com@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Sun, 16 May 2021 01:05:18 +0000 (01:05 +0000)
committercdumez@apple.com <cdumez@apple.com@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Sun, 16 May 2021 01:05:18 +0000 (01:05 +0000)
https://bugs.webkit.org/show_bug.cgi?id=225843

Reviewed by Darin Adler.

The BaseAudioContext class hierarchy used to be a bit complicated when
we supposed legacy WebAudio because OfflineAudioContext would subclass
BaseAudioContext directly, while WebKitOfflineAudioContext would
subclass BaseAudioContext via AudioContext. The class hierarchy is now
a lot simpler, BaseAudioContext is the base class and it has exactly
2 subclasses: OfflineAudioContext and AudioContext (for real-time
rendering). Now that the legacy WebAudio code is gone, this patch
cleans up BaseAudioContext and moves as much code as possible to its
subclasses (OfflineAudioContext & AudioContext).

* Modules/webaudio/AudioBuffer.cpp:
(WebCore::AudioBuffer::create):
* Modules/webaudio/AudioBufferSourceNode.cpp:
(WebCore::AudioBufferSourceNode::setBuffer):
* Modules/webaudio/AudioContext.cpp:
(WebCore::AudioContext::create):
(WebCore::AudioContext::AudioContext):
(WebCore::AudioContext::uninitialize):
(WebCore::AudioContext::lazyInitialize):
(WebCore::AudioContext::activeDOMObjectName const):
* Modules/webaudio/AudioContext.h:
(isType):
* Modules/webaudio/AudioContextState.h:
* Modules/webaudio/AudioNode.cpp:
(WebCore::AudioNode::setChannelCount):
* Modules/webaudio/AudioNodeOutput.cpp:
(WebCore::AudioNodeOutput::AudioNodeOutput):
(WebCore::AudioNodeOutput::setNumberOfChannels):
* Modules/webaudio/AudioWorkletNode.cpp:
(WebCore::AudioWorkletNode::create):
* Modules/webaudio/BaseAudioContext.cpp:
(WebCore::generateContextID):
(WebCore::BaseAudioContext::BaseAudioContext):
(WebCore::BaseAudioContext::lazyInitialize):
(WebCore::BaseAudioContext::uninitialize):
(WebCore::BaseAudioContext::stop):
(WebCore::BaseAudioContext::createScriptProcessor):
(WebCore::BaseAudioContext::derefFinishedSourceNodes):
(WebCore::BaseAudioContext::lockInternal):
(WebCore::BaseAudioContext::tryLock):
(WebCore::BaseAudioContext::unlock):
(WebCore::BaseAudioContext::handlePostRenderTasks):
(WebCore::BaseAudioContext::deleteMarkedNodes):
(WebCore::BaseAudioContext::updateAutomaticPullNodes):
(WebCore::BaseAudioContext::postTask):
(WebCore::BaseAudioContext::workletIsReady):
* Modules/webaudio/BaseAudioContext.h:
(WebCore::BaseAudioContext::isInitialized const):
(WebCore::BaseAudioContext::currentSampleFrame const):
(WebCore::BaseAudioContext::currentTime const):
(WebCore::BaseAudioContext::sampleRate const):
(WebCore::BaseAudioContext::listener):
(WebCore::BaseAudioContext::incrementConnectionCount):
(WebCore::BaseAudioContext::isAudioThread const):
(WebCore::BaseAudioContext::isAudioThreadFinished const):
(WebCore::BaseAudioContext::isGraphOwner const):
* Modules/webaudio/ChannelMergerNode.cpp:
(WebCore::ChannelMergerNode::create):
* Modules/webaudio/ChannelSplitterNode.cpp:
(WebCore::ChannelSplitterNode::create):
* Modules/webaudio/DefaultAudioDestinationNode.cpp:
(WebCore::DefaultAudioDestinationNode::DefaultAudioDestinationNode):
(WebCore::DefaultAudioDestinationNode::context):
(WebCore::DefaultAudioDestinationNode::context const):
* Modules/webaudio/DefaultAudioDestinationNode.h:
* Modules/webaudio/MediaElementAudioSourceNode.cpp:
(WebCore::MediaElementAudioSourceNode::setFormat):
* Modules/webaudio/MediaStreamAudioSourceNode.cpp:
(WebCore::MediaStreamAudioSourceNode::setFormat):
* Modules/webaudio/OfflineAudioContext.cpp:
(WebCore::OfflineAudioContext::OfflineAudioContext):
(WebCore::OfflineAudioContext::create):
(WebCore::OfflineAudioContext::uninitialize):
(WebCore::OfflineAudioContext::activeDOMObjectName const):
(WebCore::OfflineAudioContext::startRendering):
(WebCore::OfflineAudioContext::suspendRendering):
(WebCore::OfflineAudioContext::resumeRendering):
(WebCore::OfflineAudioContext::didSuspendRendering):
(WebCore::OfflineAudioContext::finishedRendering):
(WebCore::OfflineAudioContext::settleRenderingPromise):
(WebCore::OfflineAudioContext::dispatchEvent):
* Modules/webaudio/OfflineAudioContext.h:
(isType):
* Modules/webaudio/OfflineAudioContext.idl:
* Modules/webaudio/OfflineAudioDestinationNode.cpp:
(WebCore::OfflineAudioDestinationNode::OfflineAudioDestinationNode):
(WebCore::OfflineAudioDestinationNode::context):
(WebCore::OfflineAudioDestinationNode::context const):
* Modules/webaudio/OfflineAudioDestinationNode.h:
* Modules/webaudio/ScriptProcessorNode.cpp:
(WebCore::ScriptProcessorNode::ScriptProcessorNode):

git-svn-id: https://svn.webkit.org/repository/webkit/trunk@277553 268f45cc-cd09-0410-ab3c-d52691b4dbfc

23 files changed:
Source/WebCore/ChangeLog
Source/WebCore/Modules/webaudio/AudioBuffer.cpp
Source/WebCore/Modules/webaudio/AudioBufferSourceNode.cpp
Source/WebCore/Modules/webaudio/AudioContext.cpp
Source/WebCore/Modules/webaudio/AudioContext.h
Source/WebCore/Modules/webaudio/AudioContextState.h
Source/WebCore/Modules/webaudio/AudioNode.cpp
Source/WebCore/Modules/webaudio/AudioNodeOutput.cpp
Source/WebCore/Modules/webaudio/AudioWorkletNode.cpp
Source/WebCore/Modules/webaudio/BaseAudioContext.cpp
Source/WebCore/Modules/webaudio/BaseAudioContext.h
Source/WebCore/Modules/webaudio/ChannelMergerNode.cpp
Source/WebCore/Modules/webaudio/ChannelSplitterNode.cpp
Source/WebCore/Modules/webaudio/DefaultAudioDestinationNode.cpp
Source/WebCore/Modules/webaudio/DefaultAudioDestinationNode.h
Source/WebCore/Modules/webaudio/MediaElementAudioSourceNode.cpp
Source/WebCore/Modules/webaudio/MediaStreamAudioSourceNode.cpp
Source/WebCore/Modules/webaudio/OfflineAudioContext.cpp
Source/WebCore/Modules/webaudio/OfflineAudioContext.h
Source/WebCore/Modules/webaudio/OfflineAudioContext.idl
Source/WebCore/Modules/webaudio/OfflineAudioDestinationNode.cpp
Source/WebCore/Modules/webaudio/OfflineAudioDestinationNode.h
Source/WebCore/Modules/webaudio/ScriptProcessorNode.cpp

index 7749d73..037b5d9 100644 (file)
@@ -1,3 +1,102 @@
+2021-05-15  Chris Dumez  <cdumez@apple.com>
+
+        Clean up BaseAudioContext now that legacy/prefixed WebAudio is gone
+        https://bugs.webkit.org/show_bug.cgi?id=225843
+
+        Reviewed by Darin Adler.
+
+        The BaseAudioContext class hierarchy used to be a bit complicated when
+        we supposed legacy WebAudio because OfflineAudioContext would subclass
+        BaseAudioContext directly, while WebKitOfflineAudioContext would
+        subclass BaseAudioContext via AudioContext. The class hierarchy is now
+        a lot simpler, BaseAudioContext is the base class and it has exactly
+        2 subclasses: OfflineAudioContext and AudioContext (for real-time
+        rendering). Now that the legacy WebAudio code is gone, this patch
+        cleans up BaseAudioContext and moves as much code as possible to its
+        subclasses (OfflineAudioContext & AudioContext).
+
+        * Modules/webaudio/AudioBuffer.cpp:
+        (WebCore::AudioBuffer::create):
+        * Modules/webaudio/AudioBufferSourceNode.cpp:
+        (WebCore::AudioBufferSourceNode::setBuffer):
+        * Modules/webaudio/AudioContext.cpp:
+        (WebCore::AudioContext::create):
+        (WebCore::AudioContext::AudioContext):
+        (WebCore::AudioContext::uninitialize):
+        (WebCore::AudioContext::lazyInitialize):
+        (WebCore::AudioContext::activeDOMObjectName const):
+        * Modules/webaudio/AudioContext.h:
+        (isType):
+        * Modules/webaudio/AudioContextState.h:
+        * Modules/webaudio/AudioNode.cpp:
+        (WebCore::AudioNode::setChannelCount):
+        * Modules/webaudio/AudioNodeOutput.cpp:
+        (WebCore::AudioNodeOutput::AudioNodeOutput):
+        (WebCore::AudioNodeOutput::setNumberOfChannels):
+        * Modules/webaudio/AudioWorkletNode.cpp:
+        (WebCore::AudioWorkletNode::create):
+        * Modules/webaudio/BaseAudioContext.cpp:
+        (WebCore::generateContextID):
+        (WebCore::BaseAudioContext::BaseAudioContext):
+        (WebCore::BaseAudioContext::lazyInitialize):
+        (WebCore::BaseAudioContext::uninitialize):
+        (WebCore::BaseAudioContext::stop):
+        (WebCore::BaseAudioContext::createScriptProcessor):
+        (WebCore::BaseAudioContext::derefFinishedSourceNodes):
+        (WebCore::BaseAudioContext::lockInternal):
+        (WebCore::BaseAudioContext::tryLock):
+        (WebCore::BaseAudioContext::unlock):
+        (WebCore::BaseAudioContext::handlePostRenderTasks):
+        (WebCore::BaseAudioContext::deleteMarkedNodes):
+        (WebCore::BaseAudioContext::updateAutomaticPullNodes):
+        (WebCore::BaseAudioContext::postTask):
+        (WebCore::BaseAudioContext::workletIsReady):
+        * Modules/webaudio/BaseAudioContext.h:
+        (WebCore::BaseAudioContext::isInitialized const):
+        (WebCore::BaseAudioContext::currentSampleFrame const):
+        (WebCore::BaseAudioContext::currentTime const):
+        (WebCore::BaseAudioContext::sampleRate const):
+        (WebCore::BaseAudioContext::listener):
+        (WebCore::BaseAudioContext::incrementConnectionCount):
+        (WebCore::BaseAudioContext::isAudioThread const):
+        (WebCore::BaseAudioContext::isAudioThreadFinished const):
+        (WebCore::BaseAudioContext::isGraphOwner const):
+        * Modules/webaudio/ChannelMergerNode.cpp:
+        (WebCore::ChannelMergerNode::create):
+        * Modules/webaudio/ChannelSplitterNode.cpp:
+        (WebCore::ChannelSplitterNode::create):
+        * Modules/webaudio/DefaultAudioDestinationNode.cpp:
+        (WebCore::DefaultAudioDestinationNode::DefaultAudioDestinationNode):
+        (WebCore::DefaultAudioDestinationNode::context):
+        (WebCore::DefaultAudioDestinationNode::context const):
+        * Modules/webaudio/DefaultAudioDestinationNode.h:
+        * Modules/webaudio/MediaElementAudioSourceNode.cpp:
+        (WebCore::MediaElementAudioSourceNode::setFormat):
+        * Modules/webaudio/MediaStreamAudioSourceNode.cpp:
+        (WebCore::MediaStreamAudioSourceNode::setFormat):
+        * Modules/webaudio/OfflineAudioContext.cpp:
+        (WebCore::OfflineAudioContext::OfflineAudioContext):
+        (WebCore::OfflineAudioContext::create):
+        (WebCore::OfflineAudioContext::uninitialize):
+        (WebCore::OfflineAudioContext::activeDOMObjectName const):
+        (WebCore::OfflineAudioContext::startRendering):
+        (WebCore::OfflineAudioContext::suspendRendering):
+        (WebCore::OfflineAudioContext::resumeRendering):
+        (WebCore::OfflineAudioContext::didSuspendRendering):
+        (WebCore::OfflineAudioContext::finishedRendering):
+        (WebCore::OfflineAudioContext::settleRenderingPromise):
+        (WebCore::OfflineAudioContext::dispatchEvent):
+        * Modules/webaudio/OfflineAudioContext.h:
+        (isType):
+        * Modules/webaudio/OfflineAudioContext.idl:
+        * Modules/webaudio/OfflineAudioDestinationNode.cpp:
+        (WebCore::OfflineAudioDestinationNode::OfflineAudioDestinationNode):
+        (WebCore::OfflineAudioDestinationNode::context):
+        (WebCore::OfflineAudioDestinationNode::context const):
+        * Modules/webaudio/OfflineAudioDestinationNode.h:
+        * Modules/webaudio/ScriptProcessorNode.cpp:
+        (WebCore::ScriptProcessorNode::ScriptProcessorNode):
+
 2021-05-15  Alan Bujtas  <zalan@apple.com>
 
         [LFC] Move inline formatting geometry to its own class
index e4e8e68..1579f81 100644 (file)
@@ -42,7 +42,7 @@ namespace WebCore {
 
 RefPtr<AudioBuffer> AudioBuffer::create(unsigned numberOfChannels, size_t numberOfFrames, float sampleRate, LegacyPreventDetaching preventDetaching)
 {
-    if (!BaseAudioContext::isSupportedSampleRate(sampleRate) || !numberOfChannels || numberOfChannels > AudioContext::maxNumberOfChannels() || !numberOfFrames)
+    if (!BaseAudioContext::isSupportedSampleRate(sampleRate) || !numberOfChannels || numberOfChannels > AudioContext::maxNumberOfChannels || !numberOfFrames)
         return nullptr;
 
     auto buffer = adoptRef(*new AudioBuffer(numberOfChannels, numberOfFrames, sampleRate, preventDetaching));
@@ -57,7 +57,7 @@ ExceptionOr<Ref<AudioBuffer>> AudioBuffer::create(const AudioBufferOptions& opti
     if (!options.numberOfChannels)
         return Exception { NotSupportedError, "Number of channels cannot be 0."_s };
 
-    if (options.numberOfChannels > AudioContext::maxNumberOfChannels())
+    if (options.numberOfChannels > AudioContext::maxNumberOfChannels)
         return Exception { NotSupportedError, "Number of channels cannot be more than max supported."_s };
     
     if (!options.length)
index 48c1632..e0bd4ca 100644 (file)
@@ -426,7 +426,7 @@ ExceptionOr<void> AudioBufferSourceNode::setBuffer(RefPtr<AudioBuffer>&& buffer)
 
         // Do any necesssary re-configuration to the buffer's number of channels.
         unsigned numberOfChannels = buffer->numberOfChannels();
-        ASSERT(numberOfChannels <= AudioContext::maxNumberOfChannels());
+        ASSERT(numberOfChannels <= AudioContext::maxNumberOfChannels);
 
         output(0)->setNumberOfChannels(numberOfChannels);
 
index 7d56d0a..7776a51 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * Copyright (C) 2010 Google Inc. All rights reserved.
- * Copyright (C) 2016 Apple Inc. All rights reserved.
+ * Copyright (C) 2016-2021 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -28,6 +28,7 @@
 #if ENABLE(WEB_AUDIO)
 
 #include "AudioContext.h"
+#include "AudioContextOptions.h"
 #include "AudioTimestamp.h"
 #include "DOMWindow.h"
 #include "JSDOMPromiseDeferred.h"
@@ -63,6 +64,10 @@ constexpr unsigned maxHardwareContexts = 4;
 
 WTF_MAKE_ISO_ALLOCATED_IMPL(AudioContext);
 
+#if OS(WINDOWS)
+static unsigned hardwareContextCount;
+#endif
+
 static Optional<float>& defaultSampleRateForTesting()
 {
     static Optional<float> sampleRate;
@@ -85,7 +90,7 @@ ExceptionOr<Ref<AudioContext>> AudioContext::create(Document& document, AudioCon
 {
     ASSERT(isMainThread());
 #if OS(WINDOWS)
-    if (s_hardwareContextCount >= maxHardwareContexts)
+    if (hardwareContextCount >= maxHardwareContexts)
         return Exception { QuotaExceededError, "Reached maximum number of hardware contexts on this platform"_s };
 #endif
     
@@ -105,11 +110,15 @@ ExceptionOr<Ref<AudioContext>> AudioContext::create(Document& document, AudioCon
     return audioContext;
 }
 
-// Constructor for rendering to the audio hardware.
 AudioContext::AudioContext(Document& document, const AudioContextOptions& contextOptions)
-    : BaseAudioContext(document, contextOptions)
+    : BaseAudioContext(document)
+    , m_destinationNode(makeUniqueRef<DefaultAudioDestinationNode>(*this, contextOptions.sampleRate))
     , m_mediaSession(PlatformMediaSession::create(PlatformMediaSessionManager::sharedManager(), *this))
 {
+    // According to spec AudioContext must die only after page navigate.
+    // Lets mark it as ActiveDOMObject with pending activity and unmark it in clear method.
+    setPendingActivity();
+
     constructCommon();
 
     // Initialize the destination node's muted state to match the page's current muted state.
@@ -117,6 +126,14 @@ AudioContext::AudioContext(Document& document, const AudioContextOptions& contex
 
     document.addAudioProducer(*this);
     document.registerForVisibilityStateChangedCallbacks(*this);
+
+    // Unlike OfflineAudioContext, AudioContext does not require calling resume() to start rendering.
+    // Lazy initialization starts rendering so we schedule a task here to make sure lazy initialization
+    // ends up happening, even if no audio node gets constructed.
+    postTask([this] {
+        if (!isStopped())
+            lazyInitialize();
+    });
 }
 
 void AudioContext::constructCommon()
@@ -140,6 +157,21 @@ AudioContext::~AudioContext()
     }
 }
 
+void AudioContext::uninitialize()
+{
+    if (!isInitialized())
+        return;
+
+    BaseAudioContext::uninitialize();
+
+#if OS(WINDOWS)
+    ASSERT(hardwareContextCount);
+    --hardwareContextCount;
+#endif
+
+    setState(State::Closed);
+}
+
 double AudioContext::baseLatency()
 {
     lazyInitialize();
@@ -186,16 +218,6 @@ void AudioContext::close(DOMPromiseDeferred<void>&& promise)
     });
 }
 
-DefaultAudioDestinationNode& AudioContext::destination()
-{
-    return static_cast<DefaultAudioDestinationNode&>(BaseAudioContext::destination());
-}
-
-const DefaultAudioDestinationNode& AudioContext::destination() const
-{
-    return static_cast<const DefaultAudioDestinationNode&>(BaseAudioContext::destination());
-}
-
 void AudioContext::suspendRendering(DOMPromiseDeferred<void>&& promise)
 {
     if (isOfflineContext()) {
@@ -309,7 +331,9 @@ void AudioContext::lazyInitialize()
             // NOTE: for now default AudioContext does not need an explicit startRendering() call from JavaScript.
             // We may want to consider requiring it for symmetry with OfflineAudioContext.
             startRendering();
-            ++s_hardwareContextCount;
+#if OS(WINDOWS)
+            ++hardwareContextCount;
+#endif
         }
     }
 }
@@ -435,6 +459,11 @@ void AudioContext::resume()
     document()->updateIsPlayingMedia();
 }
 
+const char* AudioContext::activeDOMObjectName() const
+{
+    return "AudioContext";
+}
+
 void AudioContext::suspendPlayback()
 {
     if (state() == State::Closed || !isInitialized())
index 1664567..a6ef67c 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * Copyright (C) 2010 Google Inc. All rights reserved.
- * Copyright (C) 2016-2020 Apple Inc. All rights reserved.
+ * Copyright (C) 2016-2021 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
 
 #pragma once
 
-#include "AudioContextOptions.h"
 #include "BaseAudioContext.h"
 #include "DefaultAudioDestinationNode.h"
 #include "MediaCanStartListener.h"
 #include "MediaProducer.h"
 #include "PlatformMediaSession.h"
 #include "VisibilityChangeClient.h"
+#include <wtf/UniqueRef.h>
 
 namespace WebCore {
 
 class DOMWindow;
+class HTMLMediaElement;
+class MediaStream;
+class MediaStreamAudioDestinationNode;
+class MediaStreamAudioSourceNode;
 
+struct AudioContextOptions;
 struct AudioTimestamp;
 
 class AudioContext final
@@ -48,15 +53,16 @@ class AudioContext final
     WTF_MAKE_ISO_ALLOCATED(AudioContext);
 public:
     // Create an AudioContext for rendering to the audio hardware.
-    static ExceptionOr<Ref<AudioContext>> create(Document&, AudioContextOptions&& = { });
+    static ExceptionOr<Ref<AudioContext>> create(Document&, AudioContextOptions&&);
     ~AudioContext();
 
     WEBCORE_EXPORT static void setDefaultSampleRateForTesting(Optional<float>);
 
     void close(DOMPromiseDeferred<void>&&);
 
-    DefaultAudioDestinationNode& destination();
-    const DefaultAudioDestinationNode& destination() const;
+    DefaultAudioDestinationNode& destination() final { return m_destinationNode.get(); }
+    const DefaultAudioDestinationNode& destination() const final { return m_destinationNode.get(); }
+
     double baseLatency();
 
     AudioTimestamp getOutputTimestamp(DOMWindow&);
@@ -88,8 +94,8 @@ public:
     void addBehaviorRestriction(BehaviorRestrictions restriction) { m_restrictions |= restriction; }
     void removeBehaviorRestriction(BehaviorRestrictions restriction) { m_restrictions &= ~restriction; }
 
-protected:
-    explicit AudioContext(Document&, const AudioContextOptions& = { });
+private:
+    AudioContext(Document&, const AudioContextOptions&);
 
     bool willBeginPlayback();
 
@@ -97,7 +103,6 @@ protected:
     const Logger& logger() const final;
 #endif
 
-private:
     void constructCommon();
 
     bool userGestureRequiredForAudioStart() const { return !isOfflineContext() && m_restrictions & RequireUserGestureForAudioStartRestriction; }
@@ -105,6 +110,9 @@ private:
 
     bool willPausePlayback();
 
+    void uninitialize() final;
+    bool isOfflineContext() const final { return false; }
+
     // MediaProducer
     MediaProducer::MediaStateFlags mediaState() const final;
     void pageMutedStateDidChange() final;
@@ -129,9 +137,11 @@ private:
     void visibilityStateChanged() final;
 
     // ActiveDOMObject
+    const char* activeDOMObjectName() const final;
     void suspend(ReasonForSuspension) final;
     void resume() final;
 
+    UniqueRef<DefaultAudioDestinationNode> m_destinationNode;
     std::unique_ptr<PlatformMediaSession> m_mediaSession;
 
     BehaviorRestrictions m_restrictions { NoRestrictions };
@@ -142,3 +152,7 @@ private:
 };
 
 } // WebCore
+
+SPECIALIZE_TYPE_TRAITS_BEGIN(WebCore::AudioContext)
+    static bool isType(const WebCore::BaseAudioContext& context) { return !context.isOfflineContext(); }
+SPECIALIZE_TYPE_TRAITS_END()
index 1601311..7c37876 100644 (file)
@@ -27,6 +27,6 @@
 
 namespace WebCore {
 
-enum class AudioContextState { Suspended, Running, Interrupted, Closed };
+enum class AudioContextState : uint8_t { Suspended, Running, Interrupted, Closed };
 
 }
index 216f6b7..b373d50 100644 (file)
@@ -387,7 +387,7 @@ ExceptionOr<void> AudioNode::setChannelCount(unsigned channelCount)
     if (!channelCount)
         return Exception { NotSupportedError, "Channel count cannot be 0"_s };
     
-    if (channelCount > AudioContext::maxNumberOfChannels())
+    if (channelCount > AudioContext::maxNumberOfChannels)
         return Exception { IndexSizeError, "Channel count exceeds maximum limit"_s };
 
     if (m_channelCount == channelCount)
index 8ec8cd1..e76ce8f 100644 (file)
@@ -42,14 +42,14 @@ AudioNodeOutput::AudioNodeOutput(AudioNode* node, unsigned numberOfChannels)
     , m_numberOfChannels(numberOfChannels)
     , m_desiredNumberOfChannels(numberOfChannels)
 {
-    ASSERT(numberOfChannels <= AudioContext::maxNumberOfChannels());
+    ASSERT(numberOfChannels <= AudioContext::maxNumberOfChannels);
 
     m_internalBus = AudioBus::create(numberOfChannels, AudioUtilities::renderQuantumSize);
 }
 
 void AudioNodeOutput::setNumberOfChannels(unsigned numberOfChannels)
 {
-    ASSERT(numberOfChannels <= AudioContext::maxNumberOfChannels());
+    ASSERT(numberOfChannels <= AudioContext::maxNumberOfChannels);
     ASSERT(context().isGraphOwner());
 
     m_desiredNumberOfChannels = numberOfChannels;
index 5922682..9c41d13 100644 (file)
@@ -68,7 +68,7 @@ ExceptionOr<Ref<AudioWorkletNode>> AudioWorkletNode::create(JSC::JSGlobalObject&
             return Exception { IndexSizeError, "Length of specified outputChannelCount does not match the given number of outputs"_s };
 
         for (auto& channelCount : *options.outputChannelCount) {
-            if (channelCount < 1 || channelCount > AudioContext::maxNumberOfChannels())
+            if (channelCount < 1 || channelCount > AudioContext::maxNumberOfChannels)
                 return Exception { NotSupportedError, "Provided number of channels for output is outside supported range"_s };
         }
     }
index e1f4b04..0625f21 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * Copyright (C) 2010 Google Inc. All rights reserved.
- * Copyright (C) 2016 Apple Inc. All rights reserved.
+ * Copyright (C) 2016-2021 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -50,7 +50,6 @@
 #include "ConstantSourceNode.h"
 #include "ConstantSourceOptions.h"
 #include "ConvolverNode.h"
-#include "DefaultAudioDestinationNode.h"
 #include "DelayNode.h"
 #include "DelayOptions.h"
 #include "Document.h"
@@ -68,8 +67,6 @@
 #include "JSDOMPromiseDeferred.h"
 #include "Logging.h"
 #include "NetworkingContext.h"
-#include "OfflineAudioCompletionEvent.h"
-#include "OfflineAudioDestinationNode.h"
 #include "OscillatorNode.h"
 #include "Page.h"
 #include "PannerNode.h"
 #include "StereoPannerNode.h"
 #include "StereoPannerOptions.h"
 #include "WaveShaperNode.h"
+#include <JavaScriptCore/ArrayBuffer.h>
 #include <JavaScriptCore/ScriptCallStack.h>
+#include <wtf/Atomics.h>
+#include <wtf/IsoMallocInlines.h>
+#include <wtf/MainThread.h>
+#include <wtf/Ref.h>
 #include <wtf/Scope.h>
+#include <wtf/text/WTFString.h>
 
 #if DEBUG_AUDIONODE_REFERENCES
 #include <stdio.h>
 #include "GStreamerCommon.h"
 #endif
 
-#if PLATFORM(IOS_FAMILY)
-#include "ScriptController.h"
-#include "Settings.h"
-#endif
-
-#include <JavaScriptCore/ArrayBuffer.h>
-#include <wtf/Atomics.h>
-#include <wtf/IsoMallocInlines.h>
-#include <wtf/MainThread.h>
-#include <wtf/Ref.h>
-#include <wtf/RefCounted.h>
-#include <wtf/Scope.h>
-#include <wtf/text/WTFString.h>
-
 namespace WebCore {
 
 WTF_MAKE_ISO_ALLOCATED_IMPL(BaseAudioContext);
@@ -114,9 +103,7 @@ bool BaseAudioContext::isSupportedSampleRate(float sampleRate)
     return sampleRate >= 3000 && sampleRate <= 384000;
 }
 
-unsigned BaseAudioContext::s_hardwareContextCount;
-
-static uint64_t generateAudioContextID()
+static uint64_t generateContextID()
 {
     ASSERT(isMainThread());
     static uint64_t contextIDSeed = 0;
@@ -130,52 +117,18 @@ static HashSet<uint64_t>& liveAudioContexts()
     return contexts;
 }
 
-// Constructor for rendering to the audio hardware.
-BaseAudioContext::BaseAudioContext(Document& document, const AudioContextOptions& contextOptions)
+BaseAudioContext::BaseAudioContext(Document& document)
     : ActiveDOMObject(document)
 #if !RELEASE_LOG_DISABLED
     , m_logger(document.logger())
     , m_logIdentifier(uniqueLogIdentifier())
 #endif
-    , m_contextID(generateAudioContextID())
+    , m_contextID(generateContextID())
     , m_worklet(AudioWorklet::create(*this))
-    , m_destinationNode(makeUniqueRef<DefaultAudioDestinationNode>(*this, contextOptions.sampleRate))
     , m_listener(AudioListener::create(*this))
 {
     liveAudioContexts().add(m_contextID);
 
-    // According to spec AudioContext must die only after page navigate.
-    // Lets mark it as ActiveDOMObject with pending activity and unmark it in clear method.
-    setPendingActivity();
-
-    FFTFrame::initialize();
-
-    // Unlike OfflineAudioContext, AudioContext does not require calling resume() to start rendering.
-    // Lazy initialization starts rendering so we schedule a task here to make sure lazy initialization
-    // ends up happening, even if no audio node gets constructed.
-    postTask([this] {
-        if (m_isStopScheduled)
-            return;
-
-        lazyInitialize();
-    });
-}
-
-// Constructor for offline (non-realtime) rendering.
-BaseAudioContext::BaseAudioContext(Document& document, unsigned numberOfChannels, float sampleRate, RefPtr<AudioBuffer>&& renderTarget)
-    : ActiveDOMObject(document)
-#if !RELEASE_LOG_DISABLED
-    , m_logger(document.logger())
-    , m_logIdentifier(uniqueLogIdentifier())
-#endif
-    , m_contextID(generateAudioContextID())
-    , m_worklet(AudioWorklet::create(*this))
-    , m_isOfflineContext(true)
-    , m_renderTarget(WTFMove(renderTarget))
-    , m_destinationNode(makeUniqueRef<OfflineAudioDestinationNode>(*this, numberOfChannels, sampleRate, m_renderTarget.copyRef()))
-    , m_listener(AudioListener::create(*this))
-{
-    liveAudioContexts().add(m_contextID);
     FFTFrame::initialize();
 }
 
@@ -212,7 +165,7 @@ void BaseAudioContext::lazyInitialize()
     if (m_isAudioThreadFinished)
         return;
 
-    m_destinationNode->initialize();
+    destination().initialize();
 
     m_isInitialized = true;
 }
@@ -240,19 +193,11 @@ void BaseAudioContext::uninitialize()
         return;
 
     // This stops the audio thread and all audio rendering.
-    m_destinationNode->uninitialize();
+    destination().uninitialize();
 
     // Don't allow the context to initialize a second time after it's already been explicitly uninitialized.
     m_isAudioThreadFinished = true;
 
-    if (!isOfflineContext()) {
-        ASSERT(s_hardwareContextCount);
-        --s_hardwareContextCount;
-
-        // Offline contexts move to 'Closed' state when dispatching the completion event.
-        setState(State::Closed);
-    }
-
     {
         AutoLocker locker(*this);
         // This should have been called from handlePostRenderTasks() at the end of rendering.
@@ -268,11 +213,6 @@ void BaseAudioContext::uninitialize()
     m_isInitialized = false;
 }
 
-bool BaseAudioContext::isInitialized() const
-{
-    return m_isInitialized;
-}
-
 void BaseAudioContext::addReaction(State state, DOMPromiseDeferred<void>&& promise)
 {
     size_t stateIndex = static_cast<size_t>(state);
@@ -302,14 +242,15 @@ void BaseAudioContext::setState(State state)
 
 void BaseAudioContext::stop()
 {
+    ASSERT(isMainThread());
     ALWAYS_LOG(LOGIDENTIFIER);
     
-    ASSERT(isMainThread());
-    auto protectedThis = makeRef(*this);
-
     // Usually ScriptExecutionContext calls stop twice.
     if (m_isStopScheduled)
         return;
+
+    auto protectedThis = makeRef(*this);
+
     m_isStopScheduled = true;
 
     ASSERT(document());
@@ -319,21 +260,11 @@ void BaseAudioContext::stop()
     clear();
 }
 
-const char* BaseAudioContext::activeDOMObjectName() const
-{
-    return "AudioContext";
-}
-
 Document* BaseAudioContext::document() const
 {
     return downcast<Document>(m_scriptExecutionContext);
 }
 
-float BaseAudioContext::sampleRate() const
-{
-    return m_destinationNode->sampleRate();
-}
-
 bool BaseAudioContext::wouldTaintOrigin(const URL& url) const
 {
     if (url.protocolIsData())
@@ -378,11 +309,6 @@ void BaseAudioContext::decodeAudioData(Ref<ArrayBuffer>&& audioData, RefPtr<Audi
     });
 }
 
-AudioListener& WebCore::BaseAudioContext::listener()
-{
-    return m_listener;
-}
-
 ExceptionOr<Ref<AudioBufferSourceNode>> BaseAudioContext::createBufferSource()
 {
     ALWAYS_LOG(LOGIDENTIFIER);
@@ -435,13 +361,13 @@ ExceptionOr<Ref<ScriptProcessorNode>> BaseAudioContext::createScriptProcessor(si
     // This parameter [numberOfInputChannels] determines the number of channels for this node's input. Values of
     // up to 32 must be supported. A NotSupportedError must be thrown if the number of channels is not supported.
 
-    if (numberOfInputChannels > maxNumberOfChannels())
+    if (numberOfInputChannels > maxNumberOfChannels)
         return Exception { NotSupportedError, "numberOfInputChannels exceeds maximum number of channels"_s };
 
     // This parameter [numberOfOutputChannels] determines the number of channels for this node's output. Values of
     // up to 32 must be supported. A NotSupportedError must be thrown if the number of channels is not supported.
 
-    if (numberOfOutputChannels > maxNumberOfChannels())
+    if (numberOfOutputChannels > maxNumberOfChannels)
         return Exception { NotSupportedError, "numberOfOutputChannels exceeds maximum number of channels"_s };
 
     return ScriptProcessorNode::create(*this, bufferSize, numberOfInputChannels, numberOfOutputChannels);
@@ -583,11 +509,6 @@ ExceptionOr<Ref<IIRFilterNode>> BaseAudioContext::createIIRFilter(ScriptExecutio
     return IIRFilterNode::create(scriptExecutionContext, *this, WTFMove(options));
 }
 
-static bool isFinishedSourceNode(const AudioConnectionRefPtr<AudioNode>& node)
-{
-    return node->isFinishedSourceNode();
-}
-
 void BaseAudioContext::derefFinishedSourceNodes()
 {
     ASSERT(isGraphOwner());
@@ -596,7 +517,7 @@ void BaseAudioContext::derefFinishedSourceNodes()
     if (!m_hasFinishedAudioSourceNodes)
         return;
 
-    m_referencedSourceNodes.removeAllMatching(isFinishedSourceNode);
+    m_referencedSourceNodes.removeAllMatching([](auto& node) { return node->isFinishedSourceNode(); });
     m_hasFinishedAudioSourceNodes = false;
 }
 
@@ -641,7 +562,7 @@ void BaseAudioContext::lockInternal(bool& mustReleaseLock)
         mustReleaseLock = false;
     } else {
         // Acquire the lock.
-        m_contextGraphMutex.lock();
+        m_contextGraphLock.lock();
         m_graphOwnerThread = &thisThread;
         mustReleaseLock = true;
     }
@@ -649,13 +570,10 @@ void BaseAudioContext::lockInternal(bool& mustReleaseLock)
 
 bool BaseAudioContext::tryLock(bool& mustReleaseLock)
 {
-    Thread& thisThread = Thread::current();
-    bool isAudioThread = &thisThread == audioThread();
-
     // Try to catch cases of using try lock on main thread - it should use regular lock.
-    ASSERT(isAudioThread || isAudioThreadFinished());
+    ASSERT(isAudioThread() || isAudioThreadFinished());
     
-    if (!isAudioThread) {
+    if (!isAudioThread()) {
         // In release build treat tryLock() as lock() (since above ASSERT(isAudioThread) never fires) - this is the best we can do.
         lock(mustReleaseLock);
         return true;
@@ -663,16 +581,16 @@ bool BaseAudioContext::tryLock(bool& mustReleaseLock)
     
     bool hasLock;
     
-    if (&thisThread == m_graphOwnerThread) {
+    if (isGraphOwner()) {
         // Thread already has the lock.
         hasLock = true;
         mustReleaseLock = false;
     } else {
         // Don't already have the lock - try to acquire it.
-        hasLock = m_contextGraphMutex.tryLock();
+        hasLock = m_contextGraphLock.tryLock();
         
         if (hasLock)
-            m_graphOwnerThread = &thisThread;
+            m_graphOwnerThread = &Thread::current();
 
         mustReleaseLock = hasLock;
     }
@@ -685,17 +603,7 @@ void BaseAudioContext::unlock()
     ASSERT(m_graphOwnerThread == &Thread::current());
 
     m_graphOwnerThread = nullptr;
-    m_contextGraphMutex.unlock();
-}
-
-bool BaseAudioContext::isAudioThread() const
-{
-    return m_audioThread == &Thread::current();
-}
-
-bool BaseAudioContext::isGraphOwner() const
-{
-    return m_graphOwnerThread == &Thread::current();
+    m_contextGraphLock.unlock();
 }
 
 void BaseAudioContext::addDeferredDecrementConnectionCount(AudioNode* node)
@@ -739,26 +647,27 @@ void BaseAudioContext::handlePostRenderTasks()
     // The worst that can happen is that there will be some nodes which will take slightly longer than usual to be deleted or removed
     // from the render graph (in which case they'll render silence).
     bool mustReleaseLock;
-    if (tryLock(mustReleaseLock)) {
-        // Take care of finishing any derefs where the tryLock() failed previously.
-        handleDeferredDecrementConnectionCounts();
+    if (!tryLock(mustReleaseLock))
+        return;
 
-        // Dynamically clean up nodes which are no longer needed.
-        derefFinishedSourceNodes();
+    // Take care of finishing any derefs where the tryLock() failed previously.
+    handleDeferredDecrementConnectionCounts();
 
-        // Don't delete in the real-time thread. Let the main thread do it.
-        // Ref-counted objects held by certain AudioNodes may not be thread-safe.
-        scheduleNodeDeletion();
+    // Dynamically clean up nodes which are no longer needed.
+    derefFinishedSourceNodes();
 
-        // Fixup the state of any dirty AudioSummingJunctions and AudioNodeOutputs.
-        handleDirtyAudioSummingJunctions();
-        handleDirtyAudioNodeOutputs();
+    // Don't delete in the real-time thread. Let the main thread do it.
+    // Ref-counted objects held by certain AudioNodes may not be thread-safe.
+    scheduleNodeDeletion();
 
-        updateAutomaticPullNodes();
+    // Fixup the state of any dirty AudioSummingJunctions and AudioNodeOutputs.
+    handleDirtyAudioSummingJunctions();
+    handleDirtyAudioNodeOutputs();
 
-        if (mustReleaseLock)
-            unlock();
-    }
+    updateAutomaticPullNodes();
+
+    if (mustReleaseLock)
+        unlock();
 }
 
 void BaseAudioContext::handleDeferredDecrementConnectionCounts()
@@ -820,29 +729,28 @@ void BaseAudioContext::deleteMarkedNodes()
 
     // Protect this object from being deleted before we release the mutex locked by AutoLocker.
     auto protectedThis = makeRef(*this);
-    {
-        AutoLocker locker(*this);
 
-        while (m_nodesToDelete.size()) {
-            AudioNode* node = m_nodesToDelete.takeLast();
+    AutoLocker locker(*this);
+
+    while (m_nodesToDelete.size()) {
+        AudioNode* node = m_nodesToDelete.takeLast();
 
-            // Before deleting the node, clear out any AudioNodeInputs from m_dirtySummingJunctions.
-            unsigned numberOfInputs = node->numberOfInputs();
-            for (unsigned i = 0; i < numberOfInputs; ++i)
-                m_dirtySummingJunctions.remove(node->input(i));
+        // Before deleting the node, clear out any AudioNodeInputs from m_dirtySummingJunctions.
+        unsigned numberOfInputs = node->numberOfInputs();
+        for (unsigned i = 0; i < numberOfInputs; ++i)
+            m_dirtySummingJunctions.remove(node->input(i));
 
-            // Before deleting the node, clear out any AudioNodeOutputs from m_dirtyAudioNodeOutputs.
-            unsigned numberOfOutputs = node->numberOfOutputs();
-            for (unsigned i = 0; i < numberOfOutputs; ++i)
-                m_dirtyAudioNodeOutputs.remove(node->output(i));
+        // Before deleting the node, clear out any AudioNodeOutputs from m_dirtyAudioNodeOutputs.
+        unsigned numberOfOutputs = node->numberOfOutputs();
+        for (unsigned i = 0; i < numberOfOutputs; ++i)
+            m_dirtyAudioNodeOutputs.remove(node->output(i));
 
-            ASSERT_WITH_MESSAGE(node->nodeType() != AudioNode::NodeTypeDestination, "Destination node is owned by the BaseAudioContext");
+        ASSERT_WITH_MESSAGE(node->nodeType() != AudioNode::NodeTypeDestination, "Destination node is owned by the BaseAudioContext");
 
-            // Finally, delete it.
-            delete node;
-        }
-        m_isDeletionScheduled = false;
+        // Finally, delete it.
+        delete node;
     }
+    m_isDeletionScheduled = false;
 }
 
 void BaseAudioContext::markSummingJunctionDirty(AudioSummingJunction* summingJunction)
@@ -915,20 +823,21 @@ void BaseAudioContext::updateAutomaticPullNodes()
 {
     ASSERT(isGraphOwner());
 
-    if (m_automaticPullNodesNeedUpdating) {
-        // Heap allocations are forbidden on the audio thread for performance reasons so we need to
-        // explicitly allow the following allocation(s).
-        DisableMallocRestrictionsForCurrentThreadScope disableMallocRestrictions;
+    if (!m_automaticPullNodesNeedUpdating)
+        return;
 
-        // Copy from m_automaticPullNodes to m_renderingAutomaticPullNodes.
-        m_renderingAutomaticPullNodes.resize(m_automaticPullNodes.size());
+    // Heap allocations are forbidden on the audio thread for performance reasons so we need to
+    // explicitly allow the following allocation(s).
+    DisableMallocRestrictionsForCurrentThreadScope disableMallocRestrictions;
 
-        unsigned i = 0;
-        for (auto& output : m_automaticPullNodes)
-            m_renderingAutomaticPullNodes[i++] = output;
+    // Copy from m_automaticPullNodes to m_renderingAutomaticPullNodes.
+    m_renderingAutomaticPullNodes.resize(m_automaticPullNodes.size());
 
-        m_automaticPullNodesNeedUpdating = false;
-    }
+    unsigned i = 0;
+    for (auto& output : m_automaticPullNodes)
+        m_renderingAutomaticPullNodes[i++] = output;
+
+    m_automaticPullNodesNeedUpdating = false;
 }
 
 void BaseAudioContext::processAutomaticPullNodes(size_t framesToProcess)
@@ -944,6 +853,7 @@ ScriptExecutionContext* BaseAudioContext::scriptExecutionContext() const
     return ActiveDOMObject::scriptExecutionContext();
 }
 
+// FIXME: This should probably move to AudioContext.
 void BaseAudioContext::isPlayingAudioDidChange()
 {
     // Heap allocations are forbidden on the audio thread for performance reasons so we need to
@@ -958,53 +868,6 @@ void BaseAudioContext::isPlayingAudioDidChange()
     });
 }
 
-// FIXME: Move to OfflineAudioContext once WebKitOfflineAudioContext gets removed.
-void BaseAudioContext::finishedRendering(bool didRendering)
-{
-    ASSERT(isOfflineContext());
-    ASSERT(isMainThread());
-    auto finishedRenderingScope = WTF::makeScopeExit([this] {
-        didFinishOfflineRendering(Exception { InvalidStateError, "Offline rendering failed"_s });
-    });
-
-    if (!isMainThread())
-        return;
-
-    auto clearPendingActivityIfExitEarly = WTF::makeScopeExit([this] {
-        clearPendingActivity();
-    });
-
-
-    ALWAYS_LOG(LOGIDENTIFIER);
-
-    if (!didRendering)
-        return;
-
-    RefPtr<AudioBuffer> renderedBuffer = m_renderTarget.get();
-    setState(State::Closed);
-
-    ASSERT(renderedBuffer);
-    if (!renderedBuffer)
-        return;
-
-    // Avoid firing the event if the document has already gone away.
-    if (m_isStopScheduled)
-        return;
-
-    clearPendingActivityIfExitEarly.release();
-    queueTaskToDispatchEvent(*this, TaskSource::MediaElement, OfflineAudioCompletionEvent::create(*renderedBuffer));
-
-    finishedRenderingScope.release();
-    didFinishOfflineRendering(renderedBuffer.releaseNonNull());
-}
-
-void BaseAudioContext::dispatchEvent(Event& event)
-{
-    EventTarget::dispatchEvent(event);
-    if (event.eventInterface() == OfflineAudioCompletionEventInterfaceType)
-        clearPendingActivity();
-}
-
 void BaseAudioContext::incrementActiveSourceCount()
 {
     ++m_activeSourceCount;
@@ -1015,18 +878,11 @@ void BaseAudioContext::decrementActiveSourceCount()
     --m_activeSourceCount;
 }
 
-void BaseAudioContext::didSuspendRendering(size_t)
-{
-    setState(State::Suspended);
-}
-
-void BaseAudioContext::postTask(WTF::Function<void()>&& task)
+void BaseAudioContext::postTask(Function<void()>&& task)
 {
     ASSERT(isMainThread());
-    if (m_isStopScheduled)
-        return;
-
-    queueTaskKeepingObjectAlive(*this, TaskSource::MediaElement, WTFMove(task));
+    if (!m_isStopScheduled)
+        queueTaskKeepingObjectAlive(*this, TaskSource::MediaElement, WTFMove(task));
 }
 
 const SecurityOrigin* BaseAudioContext::origin() const
@@ -1104,7 +960,7 @@ void BaseAudioContext::workletIsReady()
 
     // If we're already rendering when the worklet becomes ready, we need to restart
     // rendering in order to switch to the audio worklet thread.
-    m_destinationNode->restartRendering();
+    destination().restartRendering();
 }
 
 #if !RELEASE_LOG_DISABLED
index ae0f491..56306fa 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * Copyright (C) 2010 Google Inc. All rights reserved.
- * Copyright (C) 2016-2020 Apple Inc. All rights reserved.
+ * Copyright (C) 2016-2021 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
 
 #if ENABLE(WEB_AUDIO)
 #include "ActiveDOMObject.h"
-#include "AsyncAudioDecoder.h"
-#include "AudioBus.h"
-#include "AudioContextOptions.h"
 #include "AudioContextState.h"
 #include "AudioDestinationNode.h"
 #include "EventTarget.h"
 #include "JSDOMPromiseDeferred.h"
 #include "OscillatorType.h"
 #include "PeriodicWaveConstraints.h"
-#include "ScriptExecutionContext.h"
-#include <JavaScriptCore/ConsoleTypes.h>
-#include <JavaScriptCore/Float32Array.h>
 #include <atomic>
-#include <wtf/HashSet.h>
+#include <wtf/Forward.h>
 #include <wtf/LoggerHelper.h>
 #include <wtf/MainThread.h>
-#include <wtf/RefPtr.h>
 #include <wtf/ThreadSafeRefCounted.h>
 #include <wtf/Threading.h>
-#include <wtf/UniqueRef.h>
-#include <wtf/Vector.h>
-#include <wtf/WeakPtr.h>
-#include <wtf/text/AtomStringHash.h>
 
 namespace WebCore {
 
 class AnalyserNode;
+class AsyncAudioDecoder;
 class AudioBuffer;
 class AudioBufferCallback;
 class AudioBufferSourceNode;
 class AudioListener;
+class AudioNodeOutput;
 class AudioSummingJunction;
 class AudioWorklet;
 class BiquadFilterNode;
@@ -69,21 +60,17 @@ class DelayNode;
 class Document;
 class DynamicsCompressorNode;
 class GainNode;
-class HTMLMediaElement;
 class IIRFilterNode;
 class MediaElementAudioSourceNode;
-class MediaStream;
-class MediaStreamAudioDestinationNode;
-class MediaStreamAudioSourceNode;
 class OscillatorNode;
 class PannerNode;
 class PeriodicWave;
-class ScriptExecutionContext;
 class ScriptProcessorNode;
 class SecurityOrigin;
 class StereoPannerNode;
 class WaveShaperNode;
 
+struct AudioIOPosition;
 struct AudioParamDescriptor;
 
 template<typename IDLType> class DOMPromiseDeferred;
@@ -112,33 +99,24 @@ public:
     uint64_t contextID() const { return m_contextID; }
 
     Document* document() const;
-    bool isInitialized() const;
+    bool isInitialized() const { return m_isInitialized; }
     
-    bool isOfflineContext() const { return m_isOfflineContext; }
+    virtual bool isOfflineContext() const = 0;
+    virtual AudioDestinationNode& destination() = 0;
+    virtual const AudioDestinationNode& destination() const = 0;
 
-    AudioDestinationNode& destination() { return m_destinationNode.get(); }
-    const AudioDestinationNode& destination() const { return m_destinationNode.get(); }
-
-    size_t currentSampleFrame() const { return m_destinationNode->currentSampleFrame(); }
-    double currentTime() const { return m_destinationNode->currentTime(); }
-    float sampleRate() const;
+    size_t currentSampleFrame() const { return destination().currentSampleFrame(); }
+    double currentTime() const { return destination().currentTime(); }
+    float sampleRate() const { return destination().sampleRate(); }
     unsigned long activeSourceCount() const { return static_cast<unsigned long>(m_activeSourceCount); }
 
     void incrementActiveSourceCount();
     void decrementActiveSourceCount();
 
-    virtual bool shouldSuspend() { return false; }
-    
-    ExceptionOr<Ref<AudioBuffer>> createBuffer(unsigned numberOfChannels, unsigned length, float sampleRate);
-
     // Asynchronous audio file data decoding.
     void decodeAudioData(Ref<ArrayBuffer>&&, RefPtr<AudioBufferCallback>&&, RefPtr<AudioBufferCallback>&&, Optional<Ref<DeferredPromise>>&& = WTF::nullopt);
 
-    AudioListener& listener();
-
-    virtual void didSuspendRendering(size_t frame);
-
-    AudioBuffer* renderTarget() const { return m_renderTarget.get(); }
+    AudioListener& listener() { return m_listener; }
 
     using State = AudioContextState;
     State state() const { return m_state; }
@@ -166,6 +144,7 @@ public:
     ExceptionOr<Ref<ConstantSourceNode>> createConstantSource();
     ExceptionOr<Ref<StereoPannerNode>> createStereoPanner();
     ExceptionOr<Ref<IIRFilterNode>> createIIRFilter(ScriptExecutionContext&, Vector<double>&& feedforward, Vector<double>&& feedback);
+    ExceptionOr<Ref<AudioBuffer>> createBuffer(unsigned numberOfChannels, unsigned length, float sampleRate);
 
     // Called at the start of each render quantum.
     void handlePreRenderTasks(const AudioIOPosition& outputPosition);
@@ -175,9 +154,6 @@ public:
     // Called at the end of each render quantum.
     void handlePostRenderTasks();
 
-    // Called periodically at the end of each render quantum to dereference finished source nodes.
-    void derefFinishedSourceNodes();
-
     // We schedule deletion of all marked nodes at the end of each realtime render quantum.
     void markForDeletion(AudioNode&);
     void deleteMarkedNodes();
@@ -194,7 +170,7 @@ public:
     void incrementConnectionCount()
     {
         ASSERT(isMainThread());
-        m_connectionCount++;
+        ++m_connectionCount;
     }
 
     unsigned connectionCount() const { return m_connectionCount; }
@@ -204,11 +180,10 @@ public:
     //
     
     void setAudioThread(Thread& thread) { m_audioThread = &thread; } // FIXME: check either not initialized or the same
-    Thread* audioThread() const { return m_audioThread; }
-    bool isAudioThread() const;
+    bool isAudioThread() const { return m_audioThread == &Thread::current(); }
 
     // Returns true only after the audio thread has been started and then shutdown.
-    bool isAudioThreadFinished() { return m_isAudioThreadFinished; }
+    bool isAudioThreadFinished() const { return m_isAudioThreadFinished; }
 
     // mustReleaseLock is set to true if we acquired the lock in this method call and caller must unlock(), false if it was previously acquired.
     void lock(bool& mustReleaseLock);
@@ -220,17 +195,15 @@ public:
     void unlock();
 
     // Returns true if this thread owns the context's lock.
-    bool isGraphOwner() const;
+    bool isGraphOwner() const { return m_graphOwnerThread == &Thread::current(); }
 
-    // Returns the maximum number of channels we can support.
-    static unsigned maxNumberOfChannels() { return MaxNumberOfChannels; }
+    // This is considering 32 is large enough for multiple channels audio.
+    // It is somewhat arbitrary and could be increased if necessary.
+    static constexpr unsigned maxNumberOfChannels = 32;
     
     // In AudioNode::decrementConnectionCount() a tryLock() is used for calling decrementConnectionCountWithLock(), but if it fails keep track here.
     void addDeferredDecrementConnectionCount(AudioNode*);
 
-    // In the audio thread at the start of each render cycle, we'll call handleDeferredDecrementConnectionCounts().
-    void handleDeferredDecrementConnectionCounts();
-
     // Only accessed when the graph lock is held.
     void markSummingJunctionDirty(AudioSummingJunction*);
     void markAudioNodeOutputDirty(AudioNodeOutput*);
@@ -239,14 +212,7 @@ public:
     void removeMarkedSummingJunction(AudioSummingJunction*);
 
     // EventTarget
-    EventTargetInterface eventTargetInterface() const final;
     ScriptExecutionContext* scriptExecutionContext() const final;
-    void refEventTarget() override { ref(); }
-    void derefEventTarget() override { deref(); }
-
-    void finishedRendering(bool didRendering);
-
-    static unsigned s_hardwareContextCount;
 
     void isPlayingAudioDidChange();
 
@@ -262,7 +228,7 @@ public:
     const void* nextAudioParameterLogIdentifier() { return childLogIdentifier(m_logIdentifier, ++m_nextAudioParameterIdentifier); }
 #endif
 
-    void postTask(WTF::Function<void()>&&);
+    void postTask(Function<void()>&&);
     bool isStopped() const { return m_isStopScheduled; }
     const SecurityOrigin* origin() const;
     void addConsoleMessage(MessageSource, MessageLevel, const String& message);
@@ -296,8 +262,7 @@ public:
     const HashMap<String, Vector<AudioParamDescriptor>>& parameterDescriptorMap() const { return m_parameterDescriptorMap; }
 
 protected:
-    explicit BaseAudioContext(Document&, const AudioContextOptions& = { });
-    BaseAudioContext(Document&, unsigned numberOfChannels, float sampleRate, RefPtr<AudioBuffer>&& renderTarget);
+    explicit BaseAudioContext(Document&);
     
     void clearPendingActivity();
     void setPendingActivity();
@@ -313,7 +278,6 @@ protected:
     void addReaction(State, DOMPromiseDeferred<void>&&);
     void setState(State);
 
-    virtual void didFinishOfflineRendering(ExceptionOr<Ref<AudioBuffer>>&&) { }
     void clear();
 
 private:
@@ -327,12 +291,19 @@ private:
     void refSourceNode(AudioNode&);
     void derefSourceNode(AudioNode&);
 
+    // Called periodically at the end of each render quantum to dereference finished source nodes.
+    void derefFinishedSourceNodes();
+
+    // In the audio thread at the start of each render cycle, we'll call handleDeferredDecrementConnectionCounts().
+    void handleDeferredDecrementConnectionCounts();
+
     // EventTarget
-    void dispatchEvent(Event&) final;
+    EventTargetInterface eventTargetInterface() const final;
+    void refEventTarget() override { ref(); }
+    void derefEventTarget() override { deref(); }
 
     // ActiveDOMObject API.
     void stop() override;
-    const char* activeDOMObjectName() const override;
 
     // When the context goes away, there might still be some sources which haven't finished playing.
     // Make sure to dereference them here.
@@ -366,13 +337,6 @@ private:
     // They will be scheduled for deletion (on the main thread) at the end of a render cycle (in realtime thread).
     Vector<AudioNode*> m_nodesToDelete;
 
-    bool m_isDeletionScheduled { false };
-    bool m_isStopScheduled { false };
-    bool m_isInitialized { false };
-    bool m_isAudioThreadFinished { false };
-    bool m_automaticPullNodesNeedUpdating { false };
-    bool m_isOfflineContext { false };
-
     // Only accessed when the graph lock is held.
     HashSet<AudioSummingJunction*> m_dirtySummingJunctions;
     HashSet<AudioNodeOutput*> m_dirtyAudioNodeOutputs;
@@ -385,35 +349,18 @@ private:
     Vector<AudioNode*> m_deferredBreakConnectionList;
     Vector<Vector<DOMPromiseDeferred<void>>> m_stateReactions;
 
-    RefPtr<AudioBuffer> m_renderTarget;
-    UniqueRef<AudioDestinationNode> m_destinationNode;
     Ref<AudioListener> m_listener;
 
-    unsigned m_connectionCount { 0 };
-
-    // Graph locking.
-    Lock m_contextGraphMutex;
-    // FIXME: Using volatile seems incorrect.
-    // https://bugs.webkit.org/show_bug.cgi?id=180332
-    Thread* volatile m_audioThread { nullptr };
-    Thread* volatile m_graphOwnerThread { nullptr }; // if the lock is held then this is the thread which owns it, otherwise == nullptr.
+    std::atomic<Thread*> m_audioThread;
+    std::atomic<Thread*> m_graphOwnerThread; // if the lock is held then this is the thread which owns it, otherwise == nullptr.
 
     std::unique_ptr<AsyncAudioDecoder> m_audioDecoder;
 
-    // This is considering 32 is large enough for multiple channels audio. 
-    // It is somewhat arbitrary and could be increased if necessary.
-    enum { MaxNumberOfChannels = 32 };
-
-    // Number of AudioBufferSourceNodes that are active (playing).
-    std::atomic<int> m_activeSourceCount { 0 };
-
-    State m_state { State::Suspended };
     RefPtr<PendingActivity<BaseAudioContext>> m_pendingActivity;
 
     AudioIOPosition m_outputPosition;
 
     HashMap<String, Vector<AudioParamDescriptor>> m_parameterDescriptorMap;
-    bool m_hasFinishedAudioSourceNodes { false };
 
     // These are cached per audio context for performance reasons. They cannot be
     // static because they rely on the sample rate.
@@ -421,6 +368,19 @@ private:
     RefPtr<PeriodicWave> m_cachedPeriodicWaveSquare;
     RefPtr<PeriodicWave> m_cachedPeriodicWaveSawtooth;
     RefPtr<PeriodicWave> m_cachedPeriodicWaveTriangle;
+
+    // Number of AudioBufferSourceNodes that are active (playing).
+    std::atomic<int> m_activeSourceCount;
+
+    unsigned m_connectionCount { 0 };
+    State m_state { State::Suspended };
+    Lock m_contextGraphLock;
+    bool m_isDeletionScheduled { false };
+    bool m_isStopScheduled { false };
+    bool m_isInitialized { false };
+    bool m_isAudioThreadFinished { false };
+    bool m_automaticPullNodesNeedUpdating { false };
+    bool m_hasFinishedAudioSourceNodes { false };
 };
 
 } // WebCore
index 8beb2ac..080ab5a 100644 (file)
@@ -43,7 +43,7 @@ WTF_MAKE_ISO_ALLOCATED_IMPL(ChannelMergerNode);
 
 ExceptionOr<Ref<ChannelMergerNode>> ChannelMergerNode::create(BaseAudioContext& context, const ChannelMergerOptions& options)
 {
-    if (options.numberOfInputs > AudioContext::maxNumberOfChannels() || !options.numberOfInputs)
+    if (options.numberOfInputs > AudioContext::maxNumberOfChannels || !options.numberOfInputs)
         return Exception { IndexSizeError, "Number of inputs is not in the allowed range."_s };
     
     auto merger = adoptRef(*new ChannelMergerNode(context, options.numberOfInputs));
index 8dbde2f..85de064 100644 (file)
@@ -39,7 +39,7 @@ WTF_MAKE_ISO_ALLOCATED_IMPL(ChannelSplitterNode);
 
 ExceptionOr<Ref<ChannelSplitterNode>> ChannelSplitterNode::create(BaseAudioContext& context, const ChannelSplitterOptions& options)
 {
-    if (options.numberOfOutputs > AudioContext::maxNumberOfChannels() || !options.numberOfOutputs)
+    if (options.numberOfOutputs > AudioContext::maxNumberOfChannels || !options.numberOfOutputs)
         return Exception { IndexSizeError, "Number of outputs is not in the allowed range"_s };
     
     auto splitter = adoptRef(*new ChannelSplitterNode(context, options.numberOfOutputs));
index 5efbfd0..09fd0c0 100644 (file)
@@ -46,7 +46,7 @@ namespace WebCore {
 
 WTF_MAKE_ISO_ALLOCATED_IMPL(DefaultAudioDestinationNode);
 
-DefaultAudioDestinationNode::DefaultAudioDestinationNode(BaseAudioContext& context, Optional<float> sampleRate)
+DefaultAudioDestinationNode::DefaultAudioDestinationNode(AudioContext& context, Optional<float> sampleRate)
     : AudioDestinationNode(context, sampleRate.valueOr(AudioDestination::hardwareSampleRate()))
 {
     ASSERT(BaseAudioContext::isSupportedSampleRate(AudioDestination::hardwareSampleRate()));
@@ -58,6 +58,16 @@ DefaultAudioDestinationNode::~DefaultAudioDestinationNode()
     uninitialize();
 }
 
+AudioContext& DefaultAudioDestinationNode::context()
+{
+    return downcast<AudioContext>(AudioDestinationNode::context());
+}
+
+const AudioContext& DefaultAudioDestinationNode::context() const
+{
+    return downcast<AudioContext>(AudioDestinationNode::context());
+}
+
 void DefaultAudioDestinationNode::initialize()
 {
     ASSERT(isMainThread()); 
index 5a87157..94cbaf9 100644 (file)
 
 namespace WebCore {
 
+class AudioContext;
 class AudioDestination;
     
 class DefaultAudioDestinationNode final : public AudioDestinationNode {
     WTF_MAKE_ISO_ALLOCATED(DefaultAudioDestinationNode);
 public:
-    explicit DefaultAudioDestinationNode(BaseAudioContext&, Optional<float> = WTF::nullopt);
+    explicit DefaultAudioDestinationNode(AudioContext&, Optional<float> = WTF::nullopt);
+
+    AudioContext& context();
+    const AudioContext& context() const;
     
     virtual ~DefaultAudioDestinationNode();
 
index 3ebaf12..fe40d65 100644 (file)
@@ -86,7 +86,7 @@ void MediaElementAudioSourceNode::setFormat(size_t numberOfChannels, float sourc
     m_muted = wouldTaintOrigin();
 
     if (numberOfChannels != m_sourceNumberOfChannels || sourceSampleRate != m_sourceSampleRate) {
-        if (!numberOfChannels || numberOfChannels > AudioContext::maxNumberOfChannels() || sourceSampleRate < minSampleRate || sourceSampleRate > maxSampleRate) {
+        if (!numberOfChannels || numberOfChannels > AudioContext::maxNumberOfChannels || sourceSampleRate < minSampleRate || sourceSampleRate > maxSampleRate) {
             // process() will generate silence for these uninitialized values.
             LOG(Media, "MediaElementAudioSourceNode::setFormat(%u, %f) - unhandled format change", static_cast<unsigned>(numberOfChannels), sourceSampleRate);
             m_sourceNumberOfChannels = 0;
index 0afcc1b..b53e3cf 100644 (file)
@@ -94,7 +94,7 @@ void MediaStreamAudioSourceNode::setFormat(size_t numberOfChannels, float source
         return;
 
     // The sample-rate must be equal to the context's sample-rate.
-    if (!numberOfChannels || numberOfChannels > AudioContext::maxNumberOfChannels()) {
+    if (!numberOfChannels || numberOfChannels > AudioContext::maxNumberOfChannels) {
         // process() will generate silence for these uninitialized values.
         LOG(Media, "MediaStreamAudioSourceNode::setFormat(%u, %f) - unhandled format change", static_cast<unsigned>(numberOfChannels), sourceSampleRate);
         m_sourceNumberOfChannels = 0;
index 84d16a1..a839c83 100644 (file)
@@ -1,5 +1,6 @@
 /*
  * Copyright (C) 2012, Google Inc. All rights reserved.
+ * Copyright (C) 2020-2021, Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -32,6 +33,7 @@
 #include "AudioUtilities.h"
 #include "Document.h"
 #include "JSAudioBuffer.h"
+#include "OfflineAudioContextOptions.h"
 #include <wtf/IsoMallocInlines.h>
 #include <wtf/Scope.h>
 
@@ -39,53 +41,60 @@ namespace WebCore {
 
 WTF_MAKE_ISO_ALLOCATED_IMPL(OfflineAudioContext);
 
-inline OfflineAudioContext::OfflineAudioContext(Document& document, unsigned numberOfChannels, unsigned length, float sampleRate, RefPtr<AudioBuffer>&& renderTarget)
-    : BaseAudioContext(document, numberOfChannels, sampleRate, WTFMove(renderTarget))
-    , m_length(length)
+OfflineAudioContext::OfflineAudioContext(Document& document, const OfflineAudioContextOptions& options)
+    : BaseAudioContext(document)
+    , m_destinationNode(makeUniqueRef<OfflineAudioDestinationNode>(*this, options.numberOfChannels, options.sampleRate, AudioBuffer::create(options.numberOfChannels, options.length, options.sampleRate)))
+    , m_length(options.length)
 {
+    if (!renderTarget())
+        document.addConsoleMessage(MessageSource::JS, MessageLevel::Warning, makeString("Failed to construct internal AudioBuffer with ", options.numberOfChannels, " channel(s), a sample rate of ", options.sampleRate, " and a length of ", options.length, "."));
 }
 
-ExceptionOr<Ref<OfflineAudioContext>> OfflineAudioContext::create(ScriptExecutionContext& context, unsigned numberOfChannels, unsigned length, float sampleRate)
+ExceptionOr<Ref<OfflineAudioContext>> OfflineAudioContext::create(ScriptExecutionContext& context, const OfflineAudioContextOptions& options)
 {
     if (!is<Document>(context))
         return Exception { NotSupportedError, "OfflineAudioContext is only supported in Document contexts"_s };
-    if (!numberOfChannels || numberOfChannels > maxNumberOfChannels())
+    if (!options.numberOfChannels || options.numberOfChannels > maxNumberOfChannels)
         return Exception { SyntaxError, "Number of channels is not in range"_s };
-    if (!length)
+    if (!options.length)
         return Exception { SyntaxError, "length cannot be 0"_s };
-    if (!isSupportedSampleRate(sampleRate))
+    if (!isSupportedSampleRate(options.sampleRate))
         return Exception { SyntaxError, "sampleRate is not in range"_s };
 
-    auto renderTarget = AudioBuffer::create(numberOfChannels, length, sampleRate);
-    if (!renderTarget)
-        context.addConsoleMessage(MessageSource::JS, MessageLevel::Warning, makeString("Failed to construct internal AudioBuffer with ", numberOfChannels, " channel(s), a sample rate of ", sampleRate, " and a length of ", length, "."));
-
-    auto audioContext = adoptRef(*new OfflineAudioContext(downcast<Document>(context), numberOfChannels, length, sampleRate, WTFMove(renderTarget)));
+    auto audioContext = adoptRef(*new OfflineAudioContext(downcast<Document>(context), options));
     audioContext->suspendIfNeeded();
     return audioContext;
 }
 
-ExceptionOr<Ref<OfflineAudioContext>> OfflineAudioContext::create(ScriptExecutionContext& context, const OfflineAudioContextOptions& contextOptions)
+ExceptionOr<Ref<OfflineAudioContext>> OfflineAudioContext::create(ScriptExecutionContext& context, unsigned numberOfChannels, unsigned length, float sampleRate)
 {
-    return create(context, contextOptions.numberOfChannels, contextOptions.length, contextOptions.sampleRate);
+    return create(context, { numberOfChannels, length, sampleRate });
 }
 
 void OfflineAudioContext::uninitialize()
 {
+    if (!isInitialized())
+        return;
+
     BaseAudioContext::uninitialize();
 
-    if (auto promise = std::exchange(m_pendingOfflineRenderingPromise, nullptr))
+    if (auto promise = std::exchange(m_pendingRenderingPromise, nullptr))
         promise->reject(Exception { InvalidStateError, "Context is going away"_s });
 }
 
-void OfflineAudioContext::startOfflineRendering(Ref<DeferredPromise>&& promise)
+const char* OfflineAudioContext::activeDOMObjectName() const
+{
+    return "OfflineAudioContext";
+}
+
+void OfflineAudioContext::startRendering(Ref<DeferredPromise>&& promise)
 {
     if (isStopped()) {
         promise->reject(Exception { InvalidStateError, "Context is stopped"_s });
         return;
     }
 
-    if (m_didStartOfflineRendering) {
+    if (m_didStartRendering) {
         promise->reject(Exception { InvalidStateError, "Rendering was already started"_s });
         return;
     }
@@ -104,13 +113,13 @@ void OfflineAudioContext::startOfflineRendering(Ref<DeferredPromise>&& promise)
         }
 
         setPendingActivity();
-        m_pendingOfflineRenderingPromise = WTFMove(promise);
-        m_didStartOfflineRendering = true;
+        m_pendingRenderingPromise = WTFMove(promise);
+        m_didStartRendering = true;
         setState(State::Running);
     });
 }
 
-void OfflineAudioContext::suspendOfflineRendering(double suspendTime, Ref<DeferredPromise>&& promise)
+void OfflineAudioContext::suspendRendering(double suspendTime, Ref<DeferredPromise>&& promise)
 {
     if (isStopped()) {
         promise->reject(Exception { InvalidStateError, "Context is stopped"_s });
@@ -143,9 +152,9 @@ void OfflineAudioContext::suspendOfflineRendering(double suspendTime, Ref<Deferr
     }
 }
 
-void OfflineAudioContext::resumeOfflineRendering(Ref<DeferredPromise>&& promise)
+void OfflineAudioContext::resumeRendering(Ref<DeferredPromise>&& promise)
 {
-    if (!m_didStartOfflineRendering) {
+    if (!m_didStartRendering) {
         promise->reject(Exception { InvalidStateError, "Cannot resume an offline audio context that has not started"_s });
         return;
     }
@@ -182,7 +191,7 @@ bool OfflineAudioContext::shouldSuspend()
 
 void OfflineAudioContext::didSuspendRendering(size_t frame)
 {
-    BaseAudioContext::didSuspendRendering(frame);
+    setState(State::Suspended);
 
     clearPendingActivity();
 
@@ -196,17 +205,38 @@ void OfflineAudioContext::didSuspendRendering(size_t frame)
         promise->resolve();
 }
 
-void OfflineAudioContext::didFinishOfflineRendering(ExceptionOr<Ref<AudioBuffer>>&& result)
+void OfflineAudioContext::finishedRendering(bool didRendering)
 {
-    auto finishedRenderingScope = WTF::makeScopeExit([this] {
+    ASSERT(isMainThread());
+    ALWAYS_LOG(LOGIDENTIFIER);
+
+    auto uninitializeOnExit = WTF::makeScopeExit([this] {
         uninitialize();
         clear();
     });
 
-    if (!m_pendingOfflineRenderingPromise)
+    setState(State::Closed);
+
+    // Avoid firing the event if the document has already gone away.
+    if (isStopped())
+        return;
+
+    RefPtr<AudioBuffer> renderedBuffer = renderTarget();
+    ASSERT(renderedBuffer);
+
+    if (didRendering) {
+        queueTaskToDispatchEvent(*this, TaskSource::MediaElement, OfflineAudioCompletionEvent::create(*renderedBuffer));
+        settleRenderingPromise(renderedBuffer.releaseNonNull());
+    } else
+        settleRenderingPromise(Exception { InvalidStateError, "Offline rendering failed"_s });
+}
+
+void OfflineAudioContext::settleRenderingPromise(ExceptionOr<Ref<AudioBuffer>>&& result)
+{
+    auto promise = std::exchange(m_pendingRenderingPromise, nullptr);
+    if (!promise)
         return;
 
-    auto promise = std::exchange(m_pendingOfflineRenderingPromise, nullptr);
     if (result.hasException()) {
         promise->reject(result.releaseException());
         return;
@@ -220,6 +250,13 @@ void OfflineAudioContext::offlineLock(bool& mustReleaseLock)
     lockInternal(mustReleaseLock);
 }
 
+void OfflineAudioContext::dispatchEvent(Event& event)
+{
+    BaseAudioContext::dispatchEvent(event);
+    if (event.eventInterface() == OfflineAudioCompletionEventInterfaceType)
+        clearPendingActivity();
+}
+
 } // namespace WebCore
 
 #endif // ENABLE(WEB_AUDIO)
index 3df07cb..d418652 100644 (file)
@@ -1,5 +1,6 @@
 /*
  * Copyright (C) 2012, Google Inc. All rights reserved.
+ * Copyright (C) 2020-2021, Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
 
 #include "BaseAudioContext.h"
 #include "JSDOMPromiseDeferred.h"
-#include "OfflineAudioContextOptions.h"
 #include "OfflineAudioDestinationNode.h"
 #include <wtf/HashMap.h>
 #include <wtf/Lock.h>
 
 namespace WebCore {
 
+struct OfflineAudioContextOptions;
+
 class OfflineAudioContext final : public BaseAudioContext {
     WTF_MAKE_ISO_ALLOCATED(OfflineAudioContext);
 public:
-    static ExceptionOr<Ref<OfflineAudioContext>> create(ScriptExecutionContext&, unsigned numberOfChannels, unsigned length, float sampleRate);
-    
     static ExceptionOr<Ref<OfflineAudioContext>> create(ScriptExecutionContext&, const OfflineAudioContextOptions&);
-
-    void startOfflineRendering(Ref<DeferredPromise>&&);
-    void suspendOfflineRendering(double suspendTime, Ref<DeferredPromise>&&);
-    void resumeOfflineRendering(Ref<DeferredPromise>&&);
+    static ExceptionOr<Ref<OfflineAudioContext>> create(ScriptExecutionContext&, unsigned numberOfChannels, unsigned length, float sampleRate);
+    void startRendering(Ref<DeferredPromise>&&);
+    void suspendRendering(double suspendTime, Ref<DeferredPromise>&&);
+    void resumeRendering(Ref<DeferredPromise>&&);
+    void finishedRendering(bool didRendering);
+    void didSuspendRendering(size_t frame);
 
     unsigned length() const { return m_length; }
-    bool shouldSuspend() final;
+    bool shouldSuspend();
+
+    OfflineAudioDestinationNode& destination() final { return m_destinationNode.get(); }
+    const OfflineAudioDestinationNode& destination() const final { return m_destinationNode.get(); }
 
-    OfflineAudioDestinationNode& destination() { return static_cast<OfflineAudioDestinationNode&>(BaseAudioContext::destination()); }
-    const OfflineAudioDestinationNode& destination() const { return static_cast<const OfflineAudioDestinationNode&>(BaseAudioContext::destination()); }
+private:
+    OfflineAudioContext(Document&, const OfflineAudioContextOptions&);
+
+    AudioBuffer* renderTarget() const { return destination().renderTarget(); }
 
     // mustReleaseLock is set to true if we acquired the lock in this method call and caller must unlock(), false if it was previously acquired.
     void offlineLock(bool& mustReleaseLock);
@@ -72,17 +79,25 @@ public:
         bool m_mustReleaseLock;
     };
 
-private:
-    OfflineAudioContext(Document&, unsigned numberOfChannels, unsigned length, float sampleRate, RefPtr<AudioBuffer>&& renderTarget);
+    // ActiveDOMObject
+    const char* activeDOMObjectName() const final;
 
-    void didFinishOfflineRendering(ExceptionOr<Ref<AudioBuffer>>&&) final;
-    void didSuspendRendering(size_t frame) final;
+    // EventTarget
+    void dispatchEvent(Event&) final;
+
+    void settleRenderingPromise(ExceptionOr<Ref<AudioBuffer>>&&);
     void uninitialize() final;
+    bool isOfflineContext() const final { return true; }
 
-    RefPtr<DeferredPromise> m_pendingOfflineRenderingPromise;
+    UniqueRef<OfflineAudioDestinationNode> m_destinationNode;
+    RefPtr<DeferredPromise> m_pendingRenderingPromise;
     HashMap<unsigned /* frame */, RefPtr<DeferredPromise>, WTF::IntHash<unsigned>, WTF::UnsignedWithZeroKeyHashTraits<unsigned>> m_suspendRequests;
     unsigned m_length;
-    bool m_didStartOfflineRendering { false };
+    bool m_didStartRendering { false };
 };
 
 } // namespace WebCore
+
+SPECIALIZE_TYPE_TRAITS_BEGIN(WebCore::OfflineAudioContext)
+    static bool isType(const WebCore::BaseAudioContext& context) { return context.isOfflineContext(); }
+SPECIALIZE_TYPE_TRAITS_END()
index 2e22db5..b6b2e4e 100644 (file)
     [CallWith=ScriptExecutionContext] constructor(OfflineAudioContextOptions contextOptions);
     [CallWith=ScriptExecutionContext] constructor(unsigned long numberOfChannels, unsigned long length, float sampleRate);
     
-    [ImplementedAs=startOfflineRendering] Promise<AudioBuffer> startRendering();
+    Promise<AudioBuffer> startRendering();
 
-    [ImplementedAs=resumeOfflineRendering] Promise<undefined> resume();
-    [ImplementedAs=suspendOfflineRendering] Promise<undefined> suspend(double suspendTime);
+    [ImplementedAs=resumeRendering] Promise<undefined> resume();
+    [ImplementedAs=suspendRendering] Promise<undefined> suspend(double suspendTime);
 
     readonly attribute unsigned long length;
 
index 4d8d5a8..5ef86af 100644 (file)
@@ -34,6 +34,7 @@
 #include "AudioWorklet.h"
 #include "AudioWorkletMessagingProxy.h"
 #include "HRTFDatabaseLoader.h"
+#include "OfflineAudioContext.h"
 #include "WorkerRunLoop.h"
 #include <algorithm>
 #include <wtf/IsoMallocInlines.h>
@@ -44,7 +45,7 @@ namespace WebCore {
 
 WTF_MAKE_ISO_ALLOCATED_IMPL(OfflineAudioDestinationNode);
 
-OfflineAudioDestinationNode::OfflineAudioDestinationNode(BaseAudioContext& context, unsigned numberOfChannels, float sampleRate, RefPtr<AudioBuffer>&& renderTarget)
+OfflineAudioDestinationNode::OfflineAudioDestinationNode(OfflineAudioContext& context, unsigned numberOfChannels, float sampleRate, RefPtr<AudioBuffer>&& renderTarget)
     : AudioDestinationNode(context, sampleRate)
     , m_numberOfChannels(numberOfChannels)
     , m_renderTarget(WTFMove(renderTarget))
@@ -59,6 +60,16 @@ OfflineAudioDestinationNode::~OfflineAudioDestinationNode()
     uninitialize();
 }
 
+OfflineAudioContext& OfflineAudioDestinationNode::context()
+{
+    return downcast<OfflineAudioContext>(AudioDestinationNode::context());
+}
+
+const OfflineAudioContext& OfflineAudioDestinationNode::context() const
+{
+    return downcast<OfflineAudioContext>(AudioDestinationNode::context());
+}
+
 unsigned OfflineAudioDestinationNode::maxChannelCount() const
 {
     return m_numberOfChannels;
index 3b042cd..bbcabaa 100644 (file)
@@ -33,13 +33,19 @@ namespace WebCore {
 
 class AudioBus;
 class AudioContext;
+class OfflineAudioContext;
     
 class OfflineAudioDestinationNode final : public AudioDestinationNode {
     WTF_MAKE_ISO_ALLOCATED(OfflineAudioDestinationNode);
 public:
-    OfflineAudioDestinationNode(BaseAudioContext&, unsigned numberOfChannels, float sampleRate, RefPtr<AudioBuffer>&& renderTarget);
+    OfflineAudioDestinationNode(OfflineAudioContext&, unsigned numberOfChannels, float sampleRate, RefPtr<AudioBuffer>&& renderTarget);
 
     virtual ~OfflineAudioDestinationNode();
+
+    OfflineAudioContext& context();
+    const OfflineAudioContext& context() const;
+
+    AudioBuffer* renderTarget() const { return m_renderTarget.get(); }
     
     // AudioNode   
     void initialize() override;
index f3e45e7..2561ffa 100644 (file)
@@ -62,7 +62,7 @@ ScriptProcessorNode::ScriptProcessorNode(BaseAudioContext& context, size_t buffe
     if (m_bufferSize < AudioUtilities::renderQuantumSize)
         m_bufferSize = AudioUtilities::renderQuantumSize;
 
-    ASSERT(numberOfInputChannels <= AudioContext::maxNumberOfChannels());
+    ASSERT(numberOfInputChannels <= AudioContext::maxNumberOfChannels);
 
     initializeDefaultNodeOptions(numberOfInputChannels, ChannelCountMode::Explicit, ChannelInterpretation::Speakers);
     addInput();