Drop AudioContextBase class
authorcdumez@apple.com <cdumez@apple.com@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Tue, 23 Jun 2020 18:59:47 +0000 (18:59 +0000)
committercdumez@apple.com <cdumez@apple.com@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Tue, 23 Jun 2020 18:59:47 +0000 (18:59 +0000)
https://bugs.webkit.org/show_bug.cgi?id=213522

Reviewed by Geoffrey Garen.

Drop AudioContextBase class and have WebKitAudioContext subclass the new BaseAudioContext class instead.
We recently introduced BaseAudioContext to match the specification and keeping AudioContextBase is now confusing.

No new tests, this simplifies our code but there is no Web-facing behavior change.

* Modules/webaudio/AnalyserNode.cpp:
(WebCore::AnalyserNode::AnalyserNode):
* Modules/webaudio/AnalyserNode.h:
* Modules/webaudio/AudioBasicInspectorNode.cpp:
(WebCore::AudioBasicInspectorNode::AudioBasicInspectorNode):
* Modules/webaudio/AudioBasicInspectorNode.h:
* Modules/webaudio/AudioBasicProcessorNode.cpp:
(WebCore::AudioBasicProcessorNode::AudioBasicProcessorNode):
* Modules/webaudio/AudioBasicProcessorNode.h:
* Modules/webaudio/AudioBufferSourceNode.cpp:
(WebCore::AudioBufferSourceNode::create):
(WebCore::AudioBufferSourceNode::AudioBufferSourceNode):
(WebCore::AudioBufferSourceNode::setBuffer):
* Modules/webaudio/AudioBufferSourceNode.h:
* Modules/webaudio/AudioContext.cpp:
* Modules/webaudio/AudioDestinationNode.cpp:
(WebCore::AudioDestinationNode::AudioDestinationNode):
* Modules/webaudio/AudioDestinationNode.h:
* Modules/webaudio/AudioNode.cpp:
(WebCore::AudioNode::AudioNode):
(WebCore::AudioNode::connect):
(WebCore::AudioNode::disconnect):
(WebCore::AudioNode::setChannelCount):
(WebCore::AudioNode::setChannelCountMode):
(WebCore::AudioNode::setChannelInterpretation):
(WebCore::AudioNode::enableOutputsIfNecessary):
* Modules/webaudio/AudioNode.h:
(WebCore::AudioNode::context):
(WebCore::AudioNode::context const):
* Modules/webaudio/AudioNode.idl:
* Modules/webaudio/AudioNodeOutput.h:
(WebCore::AudioNodeOutput::context):
* Modules/webaudio/AudioParam.cpp:
(WebCore::AudioParam::AudioParam):
* Modules/webaudio/AudioParam.h:
* Modules/webaudio/AudioParamTimeline.cpp:
(WebCore::AudioParamTimeline::valueForContextTime):
* Modules/webaudio/AudioParamTimeline.h:
* Modules/webaudio/AudioScheduledSourceNode.cpp:
(WebCore::AudioScheduledSourceNode::AudioScheduledSourceNode):
* Modules/webaudio/AudioScheduledSourceNode.h:
* Modules/webaudio/AudioSummingJunction.cpp:
(WebCore::AudioSummingJunction::AudioSummingJunction):
* Modules/webaudio/AudioSummingJunction.h:
(WebCore::AudioSummingJunction::context):
* Modules/webaudio/BaseAudioContext.cpp:
(WebCore::BaseAudioContext::BaseAudioContext):
(WebCore::BaseAudioContext::document const):
(WebCore::BaseAudioContext::scriptExecutionContext const):
* Modules/webaudio/BaseAudioContext.h:
(WebCore::BaseAudioContext::isOfflineContext const):
(WebCore::BaseAudioContext::isWebKitAudioContext const):
(WebCore::BaseAudioContext::currentSampleFrame const):
(WebCore::BaseAudioContext::currentTime const):
(WebCore::BaseAudioContext::sampleRate const):
(WebCore::BaseAudioContext::incrementConnectionCount):
(WebCore::BaseAudioContext::setAudioThread):
(WebCore::BaseAudioContext::isAudioThreadFinished):
(WebCore::BaseAudioContext::behaviorRestrictions const):
(WebCore::BaseAudioContext::addBehaviorRestriction):
(WebCore::BaseAudioContext::removeBehaviorRestriction):
(WebCore::BaseAudioContext::nextAudioNodeLogIdentifier):
(WebCore::BaseAudioContext::nextAudioParameterLogIdentifier):
(WebCore::BaseAudioContext::isStopped const):
(WebCore::BaseAudioContext::AutoLocker::AutoLocker):
(WebCore::BaseAudioContext::AutoLocker::~AutoLocker):
* Modules/webaudio/BiquadFilterNode.cpp:
(WebCore::BiquadFilterNode::BiquadFilterNode):
* Modules/webaudio/BiquadFilterNode.h:
* Modules/webaudio/BiquadProcessor.cpp:
(WebCore::BiquadProcessor::BiquadProcessor):
* Modules/webaudio/BiquadProcessor.h:
* Modules/webaudio/ChannelMergerNode.cpp:
(WebCore::ChannelMergerNode::create):
(WebCore::ChannelMergerNode::ChannelMergerNode):
* Modules/webaudio/ChannelMergerNode.h:
* Modules/webaudio/ChannelSplitterNode.cpp:
(WebCore::ChannelSplitterNode::create):
(WebCore::ChannelSplitterNode::ChannelSplitterNode):
* Modules/webaudio/ChannelSplitterNode.h:
* Modules/webaudio/ConvolverNode.cpp:
(WebCore::ConvolverNode::ConvolverNode):
* Modules/webaudio/ConvolverNode.h:
* Modules/webaudio/DefaultAudioDestinationNode.cpp:
(WebCore::DefaultAudioDestinationNode::DefaultAudioDestinationNode):
* Modules/webaudio/DefaultAudioDestinationNode.h:
* Modules/webaudio/DelayNode.cpp:
(WebCore::DelayNode::DelayNode):
(WebCore::DelayNode::create):
* Modules/webaudio/DelayNode.h:
* Modules/webaudio/DelayProcessor.cpp:
(WebCore::DelayProcessor::DelayProcessor):
* Modules/webaudio/DelayProcessor.h:
* Modules/webaudio/DynamicsCompressorNode.cpp:
(WebCore::DynamicsCompressorNode::DynamicsCompressorNode):
* Modules/webaudio/DynamicsCompressorNode.h:
* Modules/webaudio/GainNode.cpp:
(WebCore::GainNode::GainNode):
* Modules/webaudio/GainNode.h:
* Modules/webaudio/MediaElementAudioSourceNode.cpp:
(WebCore::MediaElementAudioSourceNode::create):
(WebCore::MediaElementAudioSourceNode::MediaElementAudioSourceNode):
(WebCore::MediaElementAudioSourceNode::setFormat):
* Modules/webaudio/MediaElementAudioSourceNode.h:
* Modules/webaudio/MediaStreamAudioDestinationNode.cpp:
(WebCore::MediaStreamAudioDestinationNode::create):
(WebCore::MediaStreamAudioDestinationNode::MediaStreamAudioDestinationNode):
* Modules/webaudio/MediaStreamAudioDestinationNode.h:
* Modules/webaudio/MediaStreamAudioSourceNode.cpp:
(WebCore::MediaStreamAudioSourceNode::create):
(WebCore::MediaStreamAudioSourceNode::MediaStreamAudioSourceNode):
* Modules/webaudio/MediaStreamAudioSourceNode.h:
* Modules/webaudio/OfflineAudioDestinationNode.cpp:
(WebCore::OfflineAudioDestinationNode::OfflineAudioDestinationNode):
* Modules/webaudio/OfflineAudioDestinationNode.h:
* Modules/webaudio/OscillatorNode.cpp:
(WebCore::OscillatorNode::create):
(WebCore::OscillatorNode::OscillatorNode):
* Modules/webaudio/OscillatorNode.h:
* Modules/webaudio/PannerNode.cpp:
(WebCore::PannerNodeBase::PannerNodeBase):
* Modules/webaudio/PannerNode.h:
* Modules/webaudio/ScriptProcessorNode.cpp:
(WebCore::ScriptProcessorNode::create):
(WebCore::ScriptProcessorNode::ScriptProcessorNode):
* Modules/webaudio/ScriptProcessorNode.h:
* Modules/webaudio/WaveShaperNode.cpp:
(WebCore::WaveShaperNode::WaveShaperNode):
* Modules/webaudio/WaveShaperNode.h:
* Modules/webaudio/WebKitAudioContext.cpp:
(WebCore::WebKitAudioContext::WebKitAudioContext):
(WebCore::WebKitAudioContext::createMediaElementSource):
(WebCore::WebKitAudioContext::createMediaStreamSource):
(WebCore::WebKitAudioContext::createMediaStreamDestination):
(WebCore::WebKitAudioContext::createWebKitPanner):
(WebCore::WebKitAudioContext::close):
* Modules/webaudio/WebKitAudioContext.h:
(isType):
* Modules/webaudio/WebKitAudioContext.idl:
* dom/EventTargetFactory.in:
* testing/Internals.cpp:
(WebCore::Internals::setAudioContextRestrictions):
* testing/Internals.h:
* testing/Internals.idl:

git-svn-id: https://svn.webkit.org/repository/webkit/trunk@263410 268f45cc-cd09-0410-ab3c-d52691b4dbfc

66 files changed:
Source/WebCore/ChangeLog
Source/WebCore/Modules/webaudio/AnalyserNode.cpp
Source/WebCore/Modules/webaudio/AnalyserNode.h
Source/WebCore/Modules/webaudio/AudioBasicInspectorNode.cpp
Source/WebCore/Modules/webaudio/AudioBasicInspectorNode.h
Source/WebCore/Modules/webaudio/AudioBasicProcessorNode.cpp
Source/WebCore/Modules/webaudio/AudioBasicProcessorNode.h
Source/WebCore/Modules/webaudio/AudioBufferSourceNode.cpp
Source/WebCore/Modules/webaudio/AudioBufferSourceNode.h
Source/WebCore/Modules/webaudio/AudioContext.cpp
Source/WebCore/Modules/webaudio/AudioDestinationNode.cpp
Source/WebCore/Modules/webaudio/AudioDestinationNode.h
Source/WebCore/Modules/webaudio/AudioNode.cpp
Source/WebCore/Modules/webaudio/AudioNode.h
Source/WebCore/Modules/webaudio/AudioNodeOutput.h
Source/WebCore/Modules/webaudio/AudioParam.cpp
Source/WebCore/Modules/webaudio/AudioParam.h
Source/WebCore/Modules/webaudio/AudioParamTimeline.cpp
Source/WebCore/Modules/webaudio/AudioParamTimeline.h
Source/WebCore/Modules/webaudio/AudioScheduledSourceNode.cpp
Source/WebCore/Modules/webaudio/AudioScheduledSourceNode.h
Source/WebCore/Modules/webaudio/AudioSummingJunction.cpp
Source/WebCore/Modules/webaudio/AudioSummingJunction.h
Source/WebCore/Modules/webaudio/BaseAudioContext.cpp
Source/WebCore/Modules/webaudio/BaseAudioContext.h
Source/WebCore/Modules/webaudio/BiquadFilterNode.cpp
Source/WebCore/Modules/webaudio/BiquadFilterNode.h
Source/WebCore/Modules/webaudio/BiquadProcessor.cpp
Source/WebCore/Modules/webaudio/BiquadProcessor.h
Source/WebCore/Modules/webaudio/ChannelMergerNode.cpp
Source/WebCore/Modules/webaudio/ChannelMergerNode.h
Source/WebCore/Modules/webaudio/ChannelSplitterNode.cpp
Source/WebCore/Modules/webaudio/ChannelSplitterNode.h
Source/WebCore/Modules/webaudio/ConvolverNode.cpp
Source/WebCore/Modules/webaudio/ConvolverNode.h
Source/WebCore/Modules/webaudio/DefaultAudioDestinationNode.cpp
Source/WebCore/Modules/webaudio/DefaultAudioDestinationNode.h
Source/WebCore/Modules/webaudio/DelayNode.cpp
Source/WebCore/Modules/webaudio/DelayNode.h
Source/WebCore/Modules/webaudio/DelayProcessor.cpp
Source/WebCore/Modules/webaudio/DelayProcessor.h
Source/WebCore/Modules/webaudio/DynamicsCompressorNode.cpp
Source/WebCore/Modules/webaudio/DynamicsCompressorNode.h
Source/WebCore/Modules/webaudio/GainNode.cpp
Source/WebCore/Modules/webaudio/GainNode.h
Source/WebCore/Modules/webaudio/MediaElementAudioSourceNode.cpp
Source/WebCore/Modules/webaudio/MediaElementAudioSourceNode.h
Source/WebCore/Modules/webaudio/MediaStreamAudioDestinationNode.cpp
Source/WebCore/Modules/webaudio/MediaStreamAudioDestinationNode.h
Source/WebCore/Modules/webaudio/MediaStreamAudioSourceNode.cpp
Source/WebCore/Modules/webaudio/MediaStreamAudioSourceNode.h
Source/WebCore/Modules/webaudio/OfflineAudioDestinationNode.cpp
Source/WebCore/Modules/webaudio/OfflineAudioDestinationNode.h
Source/WebCore/Modules/webaudio/OscillatorNode.cpp
Source/WebCore/Modules/webaudio/OscillatorNode.h
Source/WebCore/Modules/webaudio/PannerNode.cpp
Source/WebCore/Modules/webaudio/PannerNode.h
Source/WebCore/Modules/webaudio/ScriptProcessorNode.cpp
Source/WebCore/Modules/webaudio/ScriptProcessorNode.h
Source/WebCore/Modules/webaudio/WaveShaperNode.cpp
Source/WebCore/Modules/webaudio/WaveShaperNode.h
Source/WebCore/Modules/webaudio/WebKitAudioContext.cpp
Source/WebCore/Modules/webaudio/WebKitAudioContext.h
Source/WebCore/Modules/webaudio/WebKitAudioContext.idl
Source/WebCore/dom/EventTargetFactory.in
Source/WebCore/testing/Internals.cpp

index 635a4e2..c352143 100644 (file)
@@ -1,3 +1,160 @@
+2020-06-23  Chris Dumez  <cdumez@apple.com>
+
+        Drop AudioContextBase class
+        https://bugs.webkit.org/show_bug.cgi?id=213522
+
+        Reviewed by Geoffrey Garen.
+
+        Drop AudioContextBase class and have WebKitAudioContext subclass the new BaseAudioContext class instead.
+        We recently introduced BaseAudioContext to match the specification and keeping AudioContextBase is now confusing.
+
+        No new tests, this simplifies our code but there is no Web-facing behavior change.
+
+        * Modules/webaudio/AnalyserNode.cpp:
+        (WebCore::AnalyserNode::AnalyserNode):
+        * Modules/webaudio/AnalyserNode.h:
+        * Modules/webaudio/AudioBasicInspectorNode.cpp:
+        (WebCore::AudioBasicInspectorNode::AudioBasicInspectorNode):
+        * Modules/webaudio/AudioBasicInspectorNode.h:
+        * Modules/webaudio/AudioBasicProcessorNode.cpp:
+        (WebCore::AudioBasicProcessorNode::AudioBasicProcessorNode):
+        * Modules/webaudio/AudioBasicProcessorNode.h:
+        * Modules/webaudio/AudioBufferSourceNode.cpp:
+        (WebCore::AudioBufferSourceNode::create):
+        (WebCore::AudioBufferSourceNode::AudioBufferSourceNode):
+        (WebCore::AudioBufferSourceNode::setBuffer):
+        * Modules/webaudio/AudioBufferSourceNode.h:
+        * Modules/webaudio/AudioContext.cpp:
+        * Modules/webaudio/AudioDestinationNode.cpp:
+        (WebCore::AudioDestinationNode::AudioDestinationNode):
+        * Modules/webaudio/AudioDestinationNode.h:
+        * Modules/webaudio/AudioNode.cpp:
+        (WebCore::AudioNode::AudioNode):
+        (WebCore::AudioNode::connect):
+        (WebCore::AudioNode::disconnect):
+        (WebCore::AudioNode::setChannelCount):
+        (WebCore::AudioNode::setChannelCountMode):
+        (WebCore::AudioNode::setChannelInterpretation):
+        (WebCore::AudioNode::enableOutputsIfNecessary):
+        * Modules/webaudio/AudioNode.h:
+        (WebCore::AudioNode::context):
+        (WebCore::AudioNode::context const):
+        * Modules/webaudio/AudioNode.idl:
+        * Modules/webaudio/AudioNodeOutput.h:
+        (WebCore::AudioNodeOutput::context):
+        * Modules/webaudio/AudioParam.cpp:
+        (WebCore::AudioParam::AudioParam):
+        * Modules/webaudio/AudioParam.h:
+        * Modules/webaudio/AudioParamTimeline.cpp:
+        (WebCore::AudioParamTimeline::valueForContextTime):
+        * Modules/webaudio/AudioParamTimeline.h:
+        * Modules/webaudio/AudioScheduledSourceNode.cpp:
+        (WebCore::AudioScheduledSourceNode::AudioScheduledSourceNode):
+        * Modules/webaudio/AudioScheduledSourceNode.h:
+        * Modules/webaudio/AudioSummingJunction.cpp:
+        (WebCore::AudioSummingJunction::AudioSummingJunction):
+        * Modules/webaudio/AudioSummingJunction.h:
+        (WebCore::AudioSummingJunction::context):
+        * Modules/webaudio/BaseAudioContext.cpp:
+        (WebCore::BaseAudioContext::BaseAudioContext):
+        (WebCore::BaseAudioContext::document const):
+        (WebCore::BaseAudioContext::scriptExecutionContext const):
+        * Modules/webaudio/BaseAudioContext.h:
+        (WebCore::BaseAudioContext::isOfflineContext const):
+        (WebCore::BaseAudioContext::isWebKitAudioContext const):
+        (WebCore::BaseAudioContext::currentSampleFrame const):
+        (WebCore::BaseAudioContext::currentTime const):
+        (WebCore::BaseAudioContext::sampleRate const):
+        (WebCore::BaseAudioContext::incrementConnectionCount):
+        (WebCore::BaseAudioContext::setAudioThread):
+        (WebCore::BaseAudioContext::isAudioThreadFinished):
+        (WebCore::BaseAudioContext::behaviorRestrictions const):
+        (WebCore::BaseAudioContext::addBehaviorRestriction):
+        (WebCore::BaseAudioContext::removeBehaviorRestriction):
+        (WebCore::BaseAudioContext::nextAudioNodeLogIdentifier):
+        (WebCore::BaseAudioContext::nextAudioParameterLogIdentifier):
+        (WebCore::BaseAudioContext::isStopped const):
+        (WebCore::BaseAudioContext::AutoLocker::AutoLocker):
+        (WebCore::BaseAudioContext::AutoLocker::~AutoLocker):
+        * Modules/webaudio/BiquadFilterNode.cpp:
+        (WebCore::BiquadFilterNode::BiquadFilterNode):
+        * Modules/webaudio/BiquadFilterNode.h:
+        * Modules/webaudio/BiquadProcessor.cpp:
+        (WebCore::BiquadProcessor::BiquadProcessor):
+        * Modules/webaudio/BiquadProcessor.h:
+        * Modules/webaudio/ChannelMergerNode.cpp:
+        (WebCore::ChannelMergerNode::create):
+        (WebCore::ChannelMergerNode::ChannelMergerNode):
+        * Modules/webaudio/ChannelMergerNode.h:
+        * Modules/webaudio/ChannelSplitterNode.cpp:
+        (WebCore::ChannelSplitterNode::create):
+        (WebCore::ChannelSplitterNode::ChannelSplitterNode):
+        * Modules/webaudio/ChannelSplitterNode.h:
+        * Modules/webaudio/ConvolverNode.cpp:
+        (WebCore::ConvolverNode::ConvolverNode):
+        * Modules/webaudio/ConvolverNode.h:
+        * Modules/webaudio/DefaultAudioDestinationNode.cpp:
+        (WebCore::DefaultAudioDestinationNode::DefaultAudioDestinationNode):
+        * Modules/webaudio/DefaultAudioDestinationNode.h:
+        * Modules/webaudio/DelayNode.cpp:
+        (WebCore::DelayNode::DelayNode):
+        (WebCore::DelayNode::create):
+        * Modules/webaudio/DelayNode.h:
+        * Modules/webaudio/DelayProcessor.cpp:
+        (WebCore::DelayProcessor::DelayProcessor):
+        * Modules/webaudio/DelayProcessor.h:
+        * Modules/webaudio/DynamicsCompressorNode.cpp:
+        (WebCore::DynamicsCompressorNode::DynamicsCompressorNode):
+        * Modules/webaudio/DynamicsCompressorNode.h:
+        * Modules/webaudio/GainNode.cpp:
+        (WebCore::GainNode::GainNode):
+        * Modules/webaudio/GainNode.h:
+        * Modules/webaudio/MediaElementAudioSourceNode.cpp:
+        (WebCore::MediaElementAudioSourceNode::create):
+        (WebCore::MediaElementAudioSourceNode::MediaElementAudioSourceNode):
+        (WebCore::MediaElementAudioSourceNode::setFormat):
+        * Modules/webaudio/MediaElementAudioSourceNode.h:
+        * Modules/webaudio/MediaStreamAudioDestinationNode.cpp:
+        (WebCore::MediaStreamAudioDestinationNode::create):
+        (WebCore::MediaStreamAudioDestinationNode::MediaStreamAudioDestinationNode):
+        * Modules/webaudio/MediaStreamAudioDestinationNode.h:
+        * Modules/webaudio/MediaStreamAudioSourceNode.cpp:
+        (WebCore::MediaStreamAudioSourceNode::create):
+        (WebCore::MediaStreamAudioSourceNode::MediaStreamAudioSourceNode):
+        * Modules/webaudio/MediaStreamAudioSourceNode.h:
+        * Modules/webaudio/OfflineAudioDestinationNode.cpp:
+        (WebCore::OfflineAudioDestinationNode::OfflineAudioDestinationNode):
+        * Modules/webaudio/OfflineAudioDestinationNode.h:
+        * Modules/webaudio/OscillatorNode.cpp:
+        (WebCore::OscillatorNode::create):
+        (WebCore::OscillatorNode::OscillatorNode):
+        * Modules/webaudio/OscillatorNode.h:
+        * Modules/webaudio/PannerNode.cpp:
+        (WebCore::PannerNodeBase::PannerNodeBase):
+        * Modules/webaudio/PannerNode.h:
+        * Modules/webaudio/ScriptProcessorNode.cpp:
+        (WebCore::ScriptProcessorNode::create):
+        (WebCore::ScriptProcessorNode::ScriptProcessorNode):
+        * Modules/webaudio/ScriptProcessorNode.h:
+        * Modules/webaudio/WaveShaperNode.cpp:
+        (WebCore::WaveShaperNode::WaveShaperNode):
+        * Modules/webaudio/WaveShaperNode.h:
+        * Modules/webaudio/WebKitAudioContext.cpp:
+        (WebCore::WebKitAudioContext::WebKitAudioContext):
+        (WebCore::WebKitAudioContext::createMediaElementSource):
+        (WebCore::WebKitAudioContext::createMediaStreamSource):
+        (WebCore::WebKitAudioContext::createMediaStreamDestination):
+        (WebCore::WebKitAudioContext::createWebKitPanner):
+        (WebCore::WebKitAudioContext::close):
+        * Modules/webaudio/WebKitAudioContext.h:
+        (isType):
+        * Modules/webaudio/WebKitAudioContext.idl:
+        * dom/EventTargetFactory.in:
+        * testing/Internals.cpp:
+        (WebCore::Internals::setAudioContextRestrictions):
+        * testing/Internals.h:
+        * testing/Internals.idl:
+
 2020-06-22  Sergio Villar Senin  <svillar@igalia.com>
 
         [WebXR] Introducing XRLayer
index 215de32..5ad5af8 100644 (file)
@@ -36,7 +36,7 @@ namespace WebCore {
 
 WTF_MAKE_ISO_ALLOCATED_IMPL(AnalyserNode);
 
-AnalyserNode::AnalyserNode(AudioContextBase& context, float sampleRate)
+AnalyserNode::AnalyserNode(BaseAudioContext& context, float sampleRate)
     : AudioBasicInspectorNode(context, sampleRate, 2)
 {
     setNodeType(NodeTypeAnalyser);
index e1ed1ed..25169c4 100644 (file)
@@ -32,7 +32,7 @@ namespace WebCore {
 class AnalyserNode final : public AudioBasicInspectorNode {
     WTF_MAKE_ISO_ALLOCATED(AnalyserNode);
 public:
-    static Ref<AnalyserNode> create(AudioContextBase& context, float sampleRate)
+    static Ref<AnalyserNode> create(BaseAudioContext& context, float sampleRate)
     {
         return adoptRef(*new AnalyserNode(context, sampleRate));
     }
@@ -59,7 +59,7 @@ public:
     void getByteTimeDomainData(const Ref<JSC::Uint8Array>& array) { m_analyser.getByteTimeDomainData(array.get()); }
 
 private:
-    AnalyserNode(AudioContextBase&, float sampleRate);
+    AnalyserNode(BaseAudioContext&, float sampleRate);
 
     void process(size_t framesToProcess) final;
     void reset() final;
index 1453dac..fa68ee9 100644 (file)
@@ -36,7 +36,7 @@ namespace WebCore {
 
 WTF_MAKE_ISO_ALLOCATED_IMPL(AudioBasicInspectorNode);
 
-AudioBasicInspectorNode::AudioBasicInspectorNode(AudioContextBase& context, float sampleRate, unsigned outputChannelCount)
+AudioBasicInspectorNode::AudioBasicInspectorNode(BaseAudioContext& context, float sampleRate, unsigned outputChannelCount)
     : AudioNode(context, sampleRate)
 {
     setNodeType(NodeTypeBasicInspector);
index e769e46..bc200a5 100644 (file)
@@ -34,7 +34,7 @@ namespace WebCore {
 class AudioBasicInspectorNode : public AudioNode {
     WTF_MAKE_ISO_ALLOCATED(AudioBasicInspectorNode);
 public:
-    AudioBasicInspectorNode(AudioContextBase&, float sampleRate, unsigned outputChannelCount);
+    AudioBasicInspectorNode(BaseAudioContext&, float sampleRate, unsigned outputChannelCount);
 
 private:
     void pullInputs(size_t framesToProcess) override;
index f7b796b..e31cbaa 100644 (file)
@@ -39,7 +39,7 @@ namespace WebCore {
 
 WTF_MAKE_ISO_ALLOCATED_IMPL(AudioBasicProcessorNode);
 
-AudioBasicProcessorNode::AudioBasicProcessorNode(AudioContextBase& context, float sampleRate)
+AudioBasicProcessorNode::AudioBasicProcessorNode(BaseAudioContext& context, float sampleRate)
     : AudioNode(context, sampleRate)
 {
     addInput(makeUnique<AudioNodeInput>(this));
index e16373b..1ff462e 100644 (file)
@@ -37,7 +37,7 @@ class AudioProcessor;
 class AudioBasicProcessorNode : public AudioNode {
     WTF_MAKE_ISO_ALLOCATED(AudioBasicProcessorNode);
 public:
-    AudioBasicProcessorNode(AudioContextBase&, float sampleRate);
+    AudioBasicProcessorNode(BaseAudioContext&, float sampleRate);
 
     // AudioNode
     void process(size_t framesToProcess) override;
index 0c1ba4e..ef34550 100644 (file)
@@ -49,12 +49,12 @@ const double DefaultGrainDuration = 0.020; // 20ms
 // to minimize linear interpolation aliasing.
 const double MaxRate = 1024;
 
-Ref<AudioBufferSourceNode> AudioBufferSourceNode::create(AudioContextBase& context, float sampleRate)
+Ref<AudioBufferSourceNode> AudioBufferSourceNode::create(BaseAudioContext& context, float sampleRate)
 {
     return adoptRef(*new AudioBufferSourceNode(context, sampleRate));
 }
 
-AudioBufferSourceNode::AudioBufferSourceNode(AudioContextBase& context, float sampleRate)
+AudioBufferSourceNode::AudioBufferSourceNode(BaseAudioContext& context, float sampleRate)
     : AudioScheduledSourceNode(context, sampleRate)
     , m_buffer(nullptr)
     , m_isLooping(false)
@@ -413,7 +413,7 @@ void AudioBufferSourceNode::setBuffer(RefPtr<AudioBuffer>&& buffer)
     DEBUG_LOG(LOGIDENTIFIER);
 
     // The context must be locked since changing the buffer can re-configure the number of channels that are output.
-    AudioContextBase::AutoLocker contextLocker(context());
+    BaseAudioContext::AutoLocker contextLocker(context());
     
     // This synchronizes with process().
     auto locker = holdLock(m_processMutex);
index 6011311..9c7e1b9 100644 (file)
@@ -39,7 +39,7 @@ class PannerNodeBase;
 class AudioBufferSourceNode final : public AudioScheduledSourceNode {
     WTF_MAKE_ISO_ALLOCATED(AudioBufferSourceNode);
 public:
-    static Ref<AudioBufferSourceNode> create(AudioContextBase&, float sampleRate);
+    static Ref<AudioBufferSourceNode> create(BaseAudioContext&, float sampleRate);
 
     virtual ~AudioBufferSourceNode();
 
@@ -91,7 +91,7 @@ public:
     const char* activeDOMObjectName() const override { return "AudioBufferSourceNode"; }
 
 private:
-    AudioBufferSourceNode(AudioContextBase&, float sampleRate);
+    AudioBufferSourceNode(BaseAudioContext&, float sampleRate);
 
     double tailTime() const final { return 0; }
     double latencyTime() const final { return 0; }
index 383e1fd..27e34e6 100644 (file)
@@ -29,6 +29,7 @@
 
 #include "AudioContext.h"
 #include "JSDOMPromiseDeferred.h"
+#include <wtf/IsoMallocInlines.h>
 
 #if ENABLE(MEDIA_STREAM)
 #include "MediaStream.h"
index 677617e..100c46f 100644 (file)
@@ -39,7 +39,7 @@ namespace WebCore {
     
 WTF_MAKE_ISO_ALLOCATED_IMPL(AudioDestinationNode);
 
-AudioDestinationNode::AudioDestinationNode(AudioContextBase& context, float sampleRate)
+AudioDestinationNode::AudioDestinationNode(BaseAudioContext& context, float sampleRate)
     : AudioNode(context, sampleRate)
     , m_currentSampleFrame(0)
     , m_isSilent(true)
index 376f462..25b4515 100644 (file)
@@ -36,7 +36,7 @@ class AudioContext;
 class AudioDestinationNode : public AudioNode, public AudioIOCallback {
     WTF_MAKE_ISO_ALLOCATED(AudioDestinationNode);
 public:
-    AudioDestinationNode(AudioContextBase&, float sampleRate);
+    AudioDestinationNode(BaseAudioContext&, float sampleRate);
     virtual ~AudioDestinationNode();
     
     // AudioNode   
index 22e0546..7a76219 100644 (file)
@@ -96,7 +96,7 @@ String convertEnumerationToString(AudioNode::NodeType enumerationValue)
     return values[static_cast<size_t>(enumerationValue)];
 }
 
-AudioNode::AudioNode(AudioContextBase& context, float sampleRate)
+AudioNode::AudioNode(BaseAudioContext& context, float sampleRate)
     : m_isInitialized(false)
     , m_nodeType(NodeTypeUnknown)
     , m_context(context)
@@ -195,7 +195,7 @@ AudioNodeOutput* AudioNode::output(unsigned i)
 ExceptionOr<void> AudioNode::connect(AudioNode& destination, unsigned outputIndex, unsigned inputIndex)
 {
     ASSERT(isMainThread());
-    AudioContextBase::AutoLocker locker(context());
+    BaseAudioContext::AutoLocker locker(context());
 
     ALWAYS_LOG(LOGIDENTIFIER, destination.nodeType(), ", output = ", outputIndex, ", input = ", inputIndex);
     
@@ -221,7 +221,7 @@ ExceptionOr<void> AudioNode::connect(AudioNode& destination, unsigned outputInde
 
 ExceptionOr<void> AudioNode::connect(AudioParam& param, unsigned outputIndex)
 {
-    AudioContextBase::AutoLocker locker(context());
+    BaseAudioContext::AutoLocker locker(context());
 
     ASSERT(isMainThread());
 
@@ -242,7 +242,7 @@ ExceptionOr<void> AudioNode::connect(AudioParam& param, unsigned outputIndex)
 ExceptionOr<void> AudioNode::disconnect(unsigned outputIndex)
 {
     ASSERT(isMainThread());
-    AudioContextBase::AutoLocker locker(context());
+    BaseAudioContext::AutoLocker locker(context());
 
     // Sanity check input and output indices.
     if (outputIndex >= numberOfOutputs())
@@ -264,7 +264,7 @@ unsigned AudioNode::channelCount()
 ExceptionOr<void> AudioNode::setChannelCount(unsigned channelCount)
 {
     ASSERT(isMainThread());
-    AudioContextBase::AutoLocker locker(context());
+    BaseAudioContext::AutoLocker locker(context());
 
     ALWAYS_LOG(LOGIDENTIFIER, channelCount);
     
@@ -297,7 +297,7 @@ String AudioNode::channelCountMode()
 ExceptionOr<void> AudioNode::setChannelCountMode(const String& mode)
 {
     ASSERT(isMainThread());
-    AudioContextBase::AutoLocker locker(context());
+    BaseAudioContext::AutoLocker locker(context());
 
     ALWAYS_LOG(LOGIDENTIFIER, mode);
     
@@ -333,7 +333,7 @@ String AudioNode::channelInterpretation()
 ExceptionOr<void> AudioNode::setChannelInterpretation(const String& interpretation)
 {
     ASSERT(isMainThread());
-    AudioContextBase::AutoLocker locker(context());
+    BaseAudioContext::AutoLocker locker(context());
 
     ALWAYS_LOG(LOGIDENTIFIER, interpretation);
     
@@ -438,7 +438,7 @@ void AudioNode::enableOutputsIfNecessary()
 {
     if (m_isDisabled && m_connectionRefCount > 0) {
         ASSERT(isMainThread());
-        AudioContextBase::AutoLocker locker(context());
+        BaseAudioContext::AutoLocker locker(context());
 
         m_isDisabled = false;
         for (auto& output : m_outputs)
@@ -535,7 +535,7 @@ Variant<RefPtr<BaseAudioContext>, RefPtr<WebKitAudioContext>> AudioNode::context
 {
     if (m_context->isWebKitAudioContext())
         return makeRefPtr(static_cast<WebKitAudioContext&>(m_context.get()));
-    return makeRefPtr(static_cast<BaseAudioContext&>(m_context.get()));
+    return makeRefPtr(m_context.get());
 }
 
 void AudioNode::finishDeref(RefType refType)
index bfd917b..328fd18 100644 (file)
@@ -35,7 +35,7 @@
 
 namespace WebCore {
 
-class AudioContextBase;
+class BaseAudioContext;
 class AudioNodeInput;
 class AudioNodeOutput;
 class AudioParam;
@@ -59,11 +59,11 @@ class AudioNode
 public:
     enum { ProcessingSizeInFrames = 128 };
 
-    AudioNode(AudioContextBase&, float sampleRate);
+    AudioNode(BaseAudioContext&, float sampleRate);
     virtual ~AudioNode();
 
-    AudioContextBase& context() { return m_context.get(); }
-    const AudioContextBase& context() const { return m_context.get(); }
+    BaseAudioContext& context() { return m_context.get(); }
+    const BaseAudioContext& context() const { return m_context.get(); }
 
     Variant<RefPtr<BaseAudioContext>, RefPtr<WebKitAudioContext>> contextForBindings() const;
 
@@ -214,7 +214,7 @@ private:
 
     volatile bool m_isInitialized;
     NodeType m_nodeType;
-    Ref<AudioContextBase> m_context;
+    Ref<BaseAudioContext> m_context;
     float m_sampleRate;
     Vector<std::unique_ptr<AudioNodeInput>> m_inputs;
     Vector<std::unique_ptr<AudioNodeOutput>> m_outputs;
index f089836..2c7c0cb 100644 (file)
@@ -47,7 +47,7 @@ public:
 
     // Can be called from any thread.
     AudioNode* node() const { return m_node; }
-    AudioContextBase& context() { return m_node->context(); }
+    BaseAudioContext& context() { return m_node->context(); }
     
     // Causes our AudioNode to process if it hasn't already for this render quantum.
     // It returns the bus containing the processed audio for this output, returning inPlaceBus if in-place processing was possible.
index d470bf7..a51655e 100644 (file)
@@ -41,7 +41,7 @@ namespace WebCore {
 const double AudioParam::DefaultSmoothingConstant = 0.05;
 const double AudioParam::SnapThreshold = 0.001;
 
-AudioParam::AudioParam(AudioContextBase& context, const String& name, double defaultValue, double minValue, double maxValue, unsigned units)
+AudioParam::AudioParam(BaseAudioContext& context, const String& name, double defaultValue, double minValue, double maxValue, unsigned units)
     : AudioSummingJunction(context)
     , m_name(name)
     , m_value(defaultValue)
index 16a2109..e3a33db 100644 (file)
@@ -53,7 +53,7 @@ public:
     static const double DefaultSmoothingConstant;
     static const double SnapThreshold;
 
-    static Ref<AudioParam> create(AudioContextBase& context, const String& name, double defaultValue, double minValue, double maxValue, unsigned units = 0)
+    static Ref<AudioParam> create(BaseAudioContext& context, const String& name, double defaultValue, double minValue, double maxValue, unsigned units = 0)
     {
         return adoptRef(*new AudioParam(context, name, defaultValue, minValue, maxValue, units));
     }
@@ -109,7 +109,7 @@ public:
     void disconnect(AudioNodeOutput*);
 
 protected:
-    AudioParam(AudioContextBase&, const String&, double defaultValue, double minValue, double maxValue, unsigned units = 0);
+    AudioParam(BaseAudioContext&, const String&, double defaultValue, double minValue, double maxValue, unsigned units = 0);
 
 private:
     // sampleAccurate corresponds to a-rate (audio rate) vs. k-rate in the Web Audio specification.
index 6c41301..30ada2d 100644 (file)
@@ -113,7 +113,7 @@ void AudioParamTimeline::cancelScheduledValues(float startTime)
     }
 }
 
-float AudioParamTimeline::valueForContextTime(AudioContextBase& context, float defaultValue, bool& hasValue)
+float AudioParamTimeline::valueForContextTime(BaseAudioContext& context, float defaultValue, bool& hasValue)
 {
     {
         std::unique_lock<Lock> lock(m_eventsMutex, std::try_to_lock);
index a5868bd..8b8f0b5 100644 (file)
@@ -52,7 +52,7 @@ public:
 
     // hasValue is set to true if a valid timeline value is returned.
     // otherwise defaultValue is returned.
-    float valueForContextTime(AudioContextBase&, float defaultValue, bool& hasValue);
+    float valueForContextTime(BaseAudioContext&, float defaultValue, bool& hasValue);
 
     // Given the time range, calculates parameter values into the values buffer
     // and returns the last parameter value calculated for "values" or the defaultValue if none were calculated.
index 7c28237..76c9693 100644 (file)
@@ -49,7 +49,7 @@ WTF_MAKE_ISO_ALLOCATED_IMPL(AudioScheduledSourceNode);
 
 const double AudioScheduledSourceNode::UnknownTime = -1;
 
-AudioScheduledSourceNode::AudioScheduledSourceNode(AudioContextBase& context, float sampleRate)
+AudioScheduledSourceNode::AudioScheduledSourceNode(BaseAudioContext& context, float sampleRate)
     : AudioNode(context, sampleRate)
     , ActiveDOMObject(context.scriptExecutionContext())
     , m_endTime(UnknownTime)
index b4d9588..60b3878 100644 (file)
@@ -53,7 +53,7 @@ public:
         FINISHED_STATE = 3
     };
     
-    AudioScheduledSourceNode(AudioContextBase&, float sampleRate);
+    AudioScheduledSourceNode(BaseAudioContext&, float sampleRate);
 
     ExceptionOr<void> startLater(double when);
     ExceptionOr<void> stopLater(double when);
index dd64fd3..736ce6b 100644 (file)
@@ -34,7 +34,7 @@
 
 namespace WebCore {
 
-AudioSummingJunction::AudioSummingJunction(AudioContextBase& context)
+AudioSummingJunction::AudioSummingJunction(BaseAudioContext& context)
     : m_context(context)
     , m_renderingStateNeedUpdating(false)
 {
index 4cbf8ae..5671f70 100644 (file)
 
 namespace WebCore {
 
-class AudioContextBase;
+class BaseAudioContext;
 class AudioNodeOutput;
 
 // An AudioSummingJunction represents a point where zero, one, or more AudioNodeOutputs connect.
 
 class AudioSummingJunction {
 public:
-    explicit AudioSummingJunction(AudioContextBase&);
+    explicit AudioSummingJunction(BaseAudioContext&);
     virtual ~AudioSummingJunction();
 
     // Can be called from any thread.
-    AudioContextBase& context() { return m_context; }
+    BaseAudioContext& context() { return m_context; }
 
     // This must be called whenever we modify m_outputs.
     void changedOutputs();
@@ -60,7 +60,7 @@ public:
     virtual void didUpdate() = 0;
 
 protected:
-    Ref<AudioContextBase> m_context;
+    Ref<BaseAudioContext> m_context;
 
     // m_outputs contains the AudioNodeOutputs representing current connections which are not disabled.
     // The rendering code should never use this directly, but instead uses m_renderingOutputs.
index c81fe9a..cbec6e1 100644 (file)
@@ -95,7 +95,6 @@ const unsigned MaxPeriodicWaveLength = 4096;
 
 namespace WebCore {
 
-WTF_MAKE_ISO_ALLOCATED_IMPL(AudioContextBase);
 WTF_MAKE_ISO_ALLOCATED_IMPL(BaseAudioContext);
 
 #define RELEASE_LOG_IF_ALLOWED(fmt, ...) RELEASE_LOG_IF(document() && document()->page() && document()->page()->isAlwaysOnLoggingAllowed(), Media, "%p - BaseAudioContext::" fmt, this, ##__VA_ARGS__)
@@ -109,14 +108,9 @@ bool BaseAudioContext::isSampleRateRangeGood(float sampleRate)
 
 unsigned BaseAudioContext::s_hardwareContextCount = 0;
 
-AudioContextBase::AudioContextBase(Document& document)
-    : ActiveDOMObject(document)
-{
-}
-
 // Constructor for rendering to the audio hardware.
 BaseAudioContext::BaseAudioContext(Document& document)
-    : AudioContextBase(document)
+    : ActiveDOMObject(document)
 #if !RELEASE_LOG_DISABLED
     , m_logger(document.logger())
     , m_logIdentifier(uniqueLogIdentifier())
@@ -141,7 +135,7 @@ BaseAudioContext::BaseAudioContext(Document& document)
 
 // Constructor for offline (non-realtime) rendering.
 BaseAudioContext::BaseAudioContext(Document& document, AudioBuffer* renderTarget)
-    : AudioContextBase(document)
+    : ActiveDOMObject(document)
 #if !RELEASE_LOG_DISABLED
     , m_logger(document.logger())
     , m_logIdentifier(uniqueLogIdentifier())
@@ -343,7 +337,7 @@ const char* BaseAudioContext::activeDOMObjectName() const
     return "AudioContext";
 }
 
-Document* AudioContextBase::document() const
+Document* BaseAudioContext::document() const
 {
     return downcast<Document>(m_scriptExecutionContext);
 }
@@ -981,7 +975,7 @@ void BaseAudioContext::processAutomaticPullNodes(size_t framesToProcess)
         node->processIfNecessary(framesToProcess);
 }
 
-ScriptExecutionContext* AudioContextBase::scriptExecutionContext() const
+ScriptExecutionContext* BaseAudioContext::scriptExecutionContext() const
 {
     return ActiveDOMObject::scriptExecutionContext();
 }
index 70d4752..50aa070 100644 (file)
@@ -80,147 +80,45 @@ class WaveShaperNode;
 
 template<typename IDLType> class DOMPromiseDeferred;
 
-// FIXME: We need to rename this now that there is also BaseAudioContext.
-class AudioContextBase
+// AudioContext is the cornerstone of the web audio API and all AudioNodes are created from it.
+// For thread safety between the audio thread and the main thread, it has a rendering graph locking mechanism. 
+
+class BaseAudioContext
     : public ActiveDOMObject
-    , public ThreadSafeRefCounted<AudioContextBase>
+    , public ThreadSafeRefCounted<BaseAudioContext>
     , public EventTargetWithInlineData
     , public MediaCanStartListener
     , public MediaProducer
 #if !RELEASE_LOG_DISABLED
     , public LoggerHelper
 #endif
+    , private PlatformMediaSessionClient
+    , private VisibilityChangeClient
 {
-    WTF_MAKE_ISO_ALLOCATED(AudioContextBase);
+    WTF_MAKE_ISO_ALLOCATED(BaseAudioContext);
 public:
-    virtual ~AudioContextBase() = default;
+    virtual ~BaseAudioContext();
 
     // Reconcile ref/deref which are defined both in ThreadSafeRefCounted and EventTarget.
     using ThreadSafeRefCounted::ref;
     using ThreadSafeRefCounted::deref;
 
     Document* document() const;
-
-    virtual bool isInitialized() const = 0;
-
-    virtual size_t currentSampleFrame() const = 0;
-    virtual float sampleRate() const = 0;
-    virtual double currentTime() const = 0;
-    virtual bool isGraphOwner() const = 0;
-
-    virtual void setAudioThread(Thread&) = 0;
-    virtual bool isAudioThread() const = 0;
-    virtual bool isAudioThreadFinished() = 0;
-
-    virtual void isPlayingAudioDidChange() = 0;
-    virtual void nodeWillBeginPlayback() = 0;
-
-    virtual void postTask(WTF::Function<void()>&&) = 0;
-    virtual bool isStopped() const = 0;
-    virtual const SecurityOrigin* origin() const = 0;
-    virtual void addConsoleMessage(MessageSource, MessageLevel, const String& message) = 0;
-
-    virtual void markForDeletion(AudioNode&) = 0;
-    virtual void deleteMarkedNodes() = 0;
-
-    virtual void handlePreRenderTasks() = 0;
-    virtual void handlePostRenderTasks() = 0;
-    virtual void processAutomaticPullNodes(size_t framesToProcess) = 0;
-    virtual void addDeferredFinishDeref(AudioNode*) = 0;
-
-    virtual void removeMarkedSummingJunction(AudioSummingJunction*) = 0;
-    virtual void markSummingJunctionDirty(AudioSummingJunction*) = 0;
-    virtual void markAudioNodeOutputDirty(AudioNodeOutput*) = 0;
-
-    enum BehaviorRestrictionFlags {
-        NoRestrictions = 0,
-        RequireUserGestureForAudioStartRestriction = 1 << 0,
-        RequirePageConsentForAudioStartRestriction = 1 << 1,
-    };
-    typedef unsigned BehaviorRestrictions;
-    virtual BehaviorRestrictions behaviorRestrictions() const = 0;
-    virtual void addBehaviorRestriction(BehaviorRestrictions) = 0;
-    virtual void removeBehaviorRestriction(BehaviorRestrictions) = 0;
-
-#if !RELEASE_LOG_DISABLED
-    virtual const void* nextAudioNodeLogIdentifier() = 0;
-    virtual const void* nextAudioParameterLogIdentifier() = 0;
-#endif
-
-    virtual void addAutomaticPullNode(AudioNode&) = 0;
-    virtual void removeAutomaticPullNode(AudioNode&) = 0;
-
-    virtual void notifyNodeFinishedProcessing(AudioNode*) = 0;
-
-    virtual void finishedRendering(bool didRendering) = 0;
-
-    virtual void incrementConnectionCount() = 0;
-    virtual void incrementActiveSourceCount() = 0;
-    virtual void decrementActiveSourceCount() = 0;
-
-    virtual bool isOfflineContext() const = 0;
-    virtual bool isBaseAudioContext() const = 0;
-    virtual bool isWebKitAudioContext() const = 0;
-
-    // mustReleaseLock is set to true if we acquired the lock in this method call and caller must unlock(), false if it was previously acquired.
-    virtual void lock(bool& mustReleaseLock) = 0;
-    virtual bool tryLock(bool& mustReleaseLock) = 0;
-    virtual void unlock() = 0;
-
-    class AutoLocker {
-    public:
-        explicit AutoLocker(AudioContextBase& context)
-            : m_context(context)
-        {
-            m_context.lock(m_mustReleaseLock);
-        }
-
-        ~AutoLocker()
-        {
-            if (m_mustReleaseLock)
-                m_context.unlock();
-        }
-
-    private:
-        AudioContextBase& m_context;
-        bool m_mustReleaseLock;
-    };
-
-    // EventTarget
-    ScriptExecutionContext* scriptExecutionContext() const final;
-    void refEventTarget() override { ref(); }
-    void derefEventTarget() override { deref(); }
-
-protected:
-    explicit AudioContextBase(Document&);
-};
-
-// AudioContext is the cornerstone of the web audio API and all AudioNodes are created from it.
-// For thread safety between the audio thread and the main thread, it has a rendering graph locking mechanism. 
-
-class BaseAudioContext
-    : public AudioContextBase
-    , private PlatformMediaSessionClient
-    , private VisibilityChangeClient
-{
-    WTF_MAKE_ISO_ALLOCATED(BaseAudioContext);
-public:
-    virtual ~BaseAudioContext();
-
-    bool isInitialized() const final;
+    bool isInitialized() const;
     
-    bool isOfflineContext() const final { return m_isOfflineContext; }
+    bool isOfflineContext() const { return m_isOfflineContext; }
+    virtual bool isWebKitAudioContext() const { return false; }
 
     DocumentIdentifier hostingDocumentIdentifier() const final;
 
     AudioDestinationNode* destination() { return m_destinationNode.get(); }
-    size_t currentSampleFrame() const final { return m_destinationNode ? m_destinationNode->currentSampleFrame() : 0; }
-    double currentTime() const final { return m_destinationNode ? m_destinationNode->currentTime() : 0.; }
-    float sampleRate() const final { return m_destinationNode ? m_destinationNode->sampleRate() : 0.f; }
+    size_t currentSampleFrame() const { return m_destinationNode ? m_destinationNode->currentSampleFrame() : 0; }
+    double currentTime() const { return m_destinationNode ? m_destinationNode->currentTime() : 0.; }
+    float sampleRate() const { return m_destinationNode ? m_destinationNode->sampleRate() : 0.f; }
     unsigned long activeSourceCount() const { return static_cast<unsigned long>(m_activeSourceCount); }
 
-    void incrementActiveSourceCount() final;
-    void decrementActiveSourceCount() final;
+    void incrementActiveSourceCount();
+    void decrementActiveSourceCount();
     
     ExceptionOr<Ref<AudioBuffer>> createBuffer(unsigned numberOfChannels, size_t numberOfFrames, float sampleRate);
     ExceptionOr<Ref<AudioBuffer>> createBuffer(ArrayBuffer&, bool mixToMono);
@@ -256,31 +154,31 @@ public:
     ExceptionOr<Ref<PeriodicWave>> createPeriodicWave(Float32Array& real, Float32Array& imaginary);
 
     // When a source node has no more processing to do (has finished playing), then it tells the context to dereference it.
-    void notifyNodeFinishedProcessing(AudioNode*) final;
+    void notifyNodeFinishedProcessing(AudioNode*);
 
     // Called at the start of each render quantum.
-    void handlePreRenderTasks() final;
+    void handlePreRenderTasks();
 
     // Called at the end of each render quantum.
-    void handlePostRenderTasks() final;
+    void handlePostRenderTasks();
 
     // Called periodically at the end of each render quantum to dereference finished source nodes.
     void derefFinishedSourceNodes();
 
     // We schedule deletion of all marked nodes at the end of each realtime render quantum.
-    void markForDeletion(AudioNode&) final;
-    void deleteMarkedNodes() final;
+    void markForDeletion(AudioNode&);
+    void deleteMarkedNodes();
 
     // AudioContext can pull node(s) at the end of each render quantum even when they are not connected to any downstream nodes.
     // These two methods are called by the nodes who want to add/remove themselves into/from the automatic pull lists.
-    void addAutomaticPullNode(AudioNode&) final;
-    void removeAutomaticPullNode(AudioNode&) final;
+    void addAutomaticPullNode(AudioNode&);
+    void removeAutomaticPullNode(AudioNode&);
 
     // Called right before handlePostRenderTasks() to handle nodes which need to be pulled even when they are not connected to anything.
-    void processAutomaticPullNodes(size_t framesToProcess) final;
+    void processAutomaticPullNodes(size_t framesToProcess);
 
     // Keeps track of the number of connections made.
-    void incrementConnectionCount() final
+    void incrementConnectionCount()
     {
         ASSERT(isMainThread());
         m_connectionCount++;
@@ -292,70 +190,98 @@ public:
     // Thread Safety and Graph Locking:
     //
     
-    void setAudioThread(Thread& thread) final { m_audioThread = &thread; } // FIXME: check either not initialized or the same
+    void setAudioThread(Thread& thread) { m_audioThread = &thread; } // FIXME: check either not initialized or the same
     Thread* audioThread() const { return m_audioThread; }
-    bool isAudioThread() const final;
+    bool isAudioThread() const;
 
     // Returns true only after the audio thread has been started and then shutdown.
-    bool isAudioThreadFinished() final { return m_isAudioThreadFinished; }
+    bool isAudioThreadFinished() { return m_isAudioThreadFinished; }
     
     // mustReleaseLock is set to true if we acquired the lock in this method call and caller must unlock(), false if it was previously acquired.
-    void lock(bool& mustReleaseLock) final;
+    void lock(bool& mustReleaseLock);
 
     // Returns true if we own the lock.
     // mustReleaseLock is set to true if we acquired the lock in this method call and caller must unlock(), false if it was previously acquired.
-    bool tryLock(bool& mustReleaseLock) final;
+    bool tryLock(bool& mustReleaseLock);
 
-    void unlock() final;
+    void unlock();
 
     // Returns true if this thread owns the context's lock.
-    bool isGraphOwner() const final;
+    bool isGraphOwner() const;
 
     // Returns the maximum number of channels we can support.
     static unsigned maxNumberOfChannels() { return MaxNumberOfChannels; }
     
     // In AudioNode::deref() a tryLock() is used for calling finishDeref(), but if it fails keep track here.
-    void addDeferredFinishDeref(AudioNode*) final;
+    void addDeferredFinishDeref(AudioNode*);
 
     // In the audio thread at the start of each render cycle, we'll call handleDeferredFinishDerefs().
     void handleDeferredFinishDerefs();
 
     // Only accessed when the graph lock is held.
-    void markSummingJunctionDirty(AudioSummingJunction*) final;
-    void markAudioNodeOutputDirty(AudioNodeOutput*) final;
+    void markSummingJunctionDirty(AudioSummingJunction*);
+    void markAudioNodeOutputDirty(AudioNodeOutput*);
 
     // Must be called on main thread.
-    void removeMarkedSummingJunction(AudioSummingJunction*) final;
+    void removeMarkedSummingJunction(AudioSummingJunction*);
 
     // EventTarget
     EventTargetInterface eventTargetInterface() const final;
+    ScriptExecutionContext* scriptExecutionContext() const final;
+    void refEventTarget() override { ref(); }
+    void derefEventTarget() override { deref(); }
 
     void startRendering();
-    void finishedRendering(bool didRendering) final;
+    void finishedRendering(bool didRendering);
 
     static unsigned s_hardwareContextCount;
 
     // Restrictions to change default behaviors.
-    BehaviorRestrictions behaviorRestrictions() const final { return m_restrictions; }
-    void addBehaviorRestriction(BehaviorRestrictions restriction) final { m_restrictions |= restriction; }
-    void removeBehaviorRestriction(BehaviorRestrictions restriction) final { m_restrictions &= ~restriction; }
+    enum BehaviorRestrictionFlags {
+        NoRestrictions = 0,
+        RequireUserGestureForAudioStartRestriction = 1 << 0,
+        RequirePageConsentForAudioStartRestriction = 1 << 1,
+    };
+    typedef unsigned BehaviorRestrictions;
+    BehaviorRestrictions behaviorRestrictions() const { return m_restrictions; }
+    void addBehaviorRestriction(BehaviorRestrictions restriction) { m_restrictions |= restriction; }
+    void removeBehaviorRestriction(BehaviorRestrictions restriction) { m_restrictions &= ~restriction; }
 
-    void isPlayingAudioDidChange() final;
+    void isPlayingAudioDidChange();
 
-    void nodeWillBeginPlayback() final;
+    void nodeWillBeginPlayback();
 
 #if !RELEASE_LOG_DISABLED
     const Logger& logger() const final { return m_logger.get(); }
     const void* logIdentifier() const final { return m_logIdentifier; }
     WTFLogChannel& logChannel() const final;
-    const void* nextAudioNodeLogIdentifier() final { return childLogIdentifier(m_logIdentifier, ++m_nextAudioNodeIdentifier); }
-    const void* nextAudioParameterLogIdentifier() final { return childLogIdentifier(m_logIdentifier, ++m_nextAudioParameterIdentifier); }
+    const void* nextAudioNodeLogIdentifier() { return childLogIdentifier(m_logIdentifier, ++m_nextAudioNodeIdentifier); }
+    const void* nextAudioParameterLogIdentifier() { return childLogIdentifier(m_logIdentifier, ++m_nextAudioParameterIdentifier); }
 #endif
 
-    void postTask(WTF::Function<void()>&&) final;
-    bool isStopped() const final { return m_isStopScheduled; }
-    const SecurityOrigin* origin() const final;
-    void addConsoleMessage(MessageSource, MessageLevel, const String& message) final;
+    void postTask(WTF::Function<void()>&&);
+    bool isStopped() const { return m_isStopScheduled; }
+    const SecurityOrigin* origin() const;
+    void addConsoleMessage(MessageSource, MessageLevel, const String& message);
+
+    class AutoLocker {
+    public:
+        explicit AutoLocker(BaseAudioContext& context)
+            : m_context(context)
+        {
+            m_context.lock(m_mustReleaseLock);
+        }
+
+        ~AutoLocker()
+        {
+            if (m_mustReleaseLock)
+                m_context.unlock();
+        }
+
+    private:
+        BaseAudioContext& m_context;
+        bool m_mustReleaseLock;
+    };
 
 protected:
     explicit BaseAudioContext(Document&);
@@ -430,9 +356,6 @@ private:
 
     void visibilityStateChanged() final;
 
-    bool isBaseAudioContext() const final { return true; }
-    bool isWebKitAudioContext() const final { return false; }
-
     void handleDirtyAudioSummingJunctions();
     void handleDirtyAudioNodeOutputs();
 
@@ -513,7 +436,3 @@ private:
 };
 
 } // WebCore
-
-SPECIALIZE_TYPE_TRAITS_BEGIN(WebCore::BaseAudioContext)
-    static bool isType(const WebCore::AudioContextBase& context) { return context.isBaseAudioContext(); }
-SPECIALIZE_TYPE_TRAITS_END()
index dde0773..bf5488d 100644 (file)
@@ -33,7 +33,7 @@ namespace WebCore {
 
 WTF_MAKE_ISO_ALLOCATED_IMPL(BiquadFilterNode);
 
-BiquadFilterNode::BiquadFilterNode(AudioContextBase& context, float sampleRate)
+BiquadFilterNode::BiquadFilterNode(BaseAudioContext& context, float sampleRate)
     : AudioBasicProcessorNode(context, sampleRate)
 {
     setNodeType(NodeTypeBiquadFilter);
index ac8d947..8d789a3 100644 (file)
@@ -34,7 +34,7 @@ class AudioParam;
 class BiquadFilterNode final : public AudioBasicProcessorNode {
     WTF_MAKE_ISO_ALLOCATED(BiquadFilterNode);
 public:
-    static Ref<BiquadFilterNode> create(AudioContextBase& context, float sampleRate)
+    static Ref<BiquadFilterNode> create(BaseAudioContext& context, float sampleRate)
     {
         return adoptRef(*new BiquadFilterNode(context, sampleRate));
     }
@@ -52,7 +52,7 @@ public:
     void getFrequencyResponse(const RefPtr<Float32Array>& frequencyHz, const RefPtr<Float32Array>& magResponse, const RefPtr<Float32Array>& phaseResponse);
 
 private:
-    BiquadFilterNode(AudioContextBase&, float sampleRate);
+    BiquadFilterNode(BaseAudioContext&, float sampleRate);
 
     BiquadProcessor* biquadProcessor() { return static_cast<BiquadProcessor*>(processor()); }
 };
index 2f6067a..d9ceba8 100644 (file)
@@ -32,7 +32,7 @@
 
 namespace WebCore {
     
-BiquadProcessor::BiquadProcessor(AudioContextBase& context, float sampleRate, size_t numberOfChannels, bool autoInitialize)
+BiquadProcessor::BiquadProcessor(BaseAudioContext& context, float sampleRate, size_t numberOfChannels, bool autoInitialize)
     : AudioDSPKernelProcessor(sampleRate, numberOfChannels)
     , m_type(BiquadFilterType::Lowpass)
     , m_parameter1(0)
index dc306ab..cb15288 100644 (file)
@@ -50,7 +50,7 @@ enum class BiquadFilterType {
 class BiquadProcessor final : public AudioDSPKernelProcessor {
     WTF_MAKE_FAST_ALLOCATED;
 public:
-    BiquadProcessor(AudioContextBase&, float sampleRate, size_t numberOfChannels, bool autoInitialize);
+    BiquadProcessor(BaseAudioContext&, float sampleRate, size_t numberOfChannels, bool autoInitialize);
 
     virtual ~BiquadProcessor();
     
index de28aea..5779415 100644 (file)
@@ -43,7 +43,7 @@ namespace WebCore {
 
 WTF_MAKE_ISO_ALLOCATED_IMPL(ChannelMergerNode);
 
-RefPtr<ChannelMergerNode> ChannelMergerNode::create(AudioContextBase& context, float sampleRate, unsigned numberOfInputs)
+RefPtr<ChannelMergerNode> ChannelMergerNode::create(BaseAudioContext& context, float sampleRate, unsigned numberOfInputs)
 {
     if (!numberOfInputs || numberOfInputs > AudioContext::maxNumberOfChannels())
         return nullptr;
@@ -51,7 +51,7 @@ RefPtr<ChannelMergerNode> ChannelMergerNode::create(AudioContextBase& context, f
     return adoptRef(*new ChannelMergerNode(context, sampleRate, numberOfInputs));      
 }
 
-ChannelMergerNode::ChannelMergerNode(AudioContextBase& context, float sampleRate, unsigned numberOfInputs)
+ChannelMergerNode::ChannelMergerNode(BaseAudioContext& context, float sampleRate, unsigned numberOfInputs)
     : AudioNode(context, sampleRate)
     , m_desiredNumberOfOutputChannels(DefaultNumberOfOutputChannels)
 {
index 62976e0..5afc329 100644 (file)
@@ -37,7 +37,7 @@ class AudioContext;
 class ChannelMergerNode final : public AudioNode {
     WTF_MAKE_ISO_ALLOCATED(ChannelMergerNode);
 public:
-    static RefPtr<ChannelMergerNode> create(AudioContextBase&, float sampleRate, unsigned numberOfInputs);
+    static RefPtr<ChannelMergerNode> create(BaseAudioContext&, float sampleRate, unsigned numberOfInputs);
 
     // AudioNode
     void process(size_t framesToProcess) override;
@@ -52,7 +52,7 @@ private:
     double tailTime() const override { return 0; }
     double latencyTime() const override { return 0; }
 
-    ChannelMergerNode(AudioContextBase&, float sampleRate, unsigned numberOfInputs);
+    ChannelMergerNode(BaseAudioContext&, float sampleRate, unsigned numberOfInputs);
 };
 
 } // namespace WebCore
index f5553d0..379f1ff 100644 (file)
@@ -37,7 +37,7 @@ namespace WebCore {
 
 WTF_MAKE_ISO_ALLOCATED_IMPL(ChannelSplitterNode);
     
-RefPtr<ChannelSplitterNode> ChannelSplitterNode::create(AudioContextBase& context, float sampleRate, unsigned numberOfOutputs)
+RefPtr<ChannelSplitterNode> ChannelSplitterNode::create(BaseAudioContext& context, float sampleRate, unsigned numberOfOutputs)
 {
     if (!numberOfOutputs || numberOfOutputs > AudioContext::maxNumberOfChannels())
         return nullptr;
@@ -45,7 +45,7 @@ RefPtr<ChannelSplitterNode> ChannelSplitterNode::create(AudioContextBase& contex
     return adoptRef(*new ChannelSplitterNode(context, sampleRate, numberOfOutputs));      
 }
 
-ChannelSplitterNode::ChannelSplitterNode(AudioContextBase& context, float sampleRate, unsigned numberOfOutputs)
+ChannelSplitterNode::ChannelSplitterNode(BaseAudioContext& context, float sampleRate, unsigned numberOfOutputs)
     : AudioNode(context, sampleRate)
 {
     setNodeType(NodeTypeChannelSplitter);
index 13d8d98..442844d 100644 (file)
@@ -33,7 +33,7 @@ class AudioContext;
 class ChannelSplitterNode final : public AudioNode {
     WTF_MAKE_ISO_ALLOCATED(ChannelSplitterNode);
 public:
-    static RefPtr<ChannelSplitterNode> create(AudioContextBase&, float sampleRate, unsigned numberOfOutputs);
+    static RefPtr<ChannelSplitterNode> create(BaseAudioContext&, float sampleRate, unsigned numberOfOutputs);
 
     // AudioNode
     void process(size_t framesToProcess) override;
@@ -43,7 +43,7 @@ private:
     double tailTime() const override { return 0; }
     double latencyTime() const override { return 0; }
 
-    ChannelSplitterNode(AudioContextBase&, float sampleRate, unsigned numberOfOutputs);
+    ChannelSplitterNode(BaseAudioContext&, float sampleRate, unsigned numberOfOutputs);
 };
 
 } // namespace WebCore
index e1f5eb8..7e138d4 100644 (file)
@@ -47,7 +47,7 @@ namespace WebCore {
 
 WTF_MAKE_ISO_ALLOCATED_IMPL(ConvolverNode);
 
-ConvolverNode::ConvolverNode(AudioContextBase& context, float sampleRate)
+ConvolverNode::ConvolverNode(BaseAudioContext& context, float sampleRate)
     : AudioNode(context, sampleRate)
 {
     setNodeType(NodeTypeConvolver);
index 31cec1e..fa87d2f 100644 (file)
@@ -35,7 +35,7 @@ class Reverb;
 class ConvolverNode final : public AudioNode {
     WTF_MAKE_ISO_ALLOCATED(ConvolverNode);
 public:
-    static Ref<ConvolverNode> create(AudioContextBase& context, float sampleRate)
+    static Ref<ConvolverNode> create(BaseAudioContext& context, float sampleRate)
     {
         return adoptRef(*new ConvolverNode(context, sampleRate));
     }
@@ -49,7 +49,7 @@ public:
     void setNormalize(bool normalize) { m_normalize = normalize; }
 
 private:
-    ConvolverNode(AudioContextBase&, float sampleRate);
+    ConvolverNode(BaseAudioContext&, float sampleRate);
 
     double tailTime() const final;
     double latencyTime() const final;
index 07eb9a4..4dd99c1 100644 (file)
@@ -43,7 +43,7 @@ namespace WebCore {
 
 WTF_MAKE_ISO_ALLOCATED_IMPL(DefaultAudioDestinationNode);
     
-DefaultAudioDestinationNode::DefaultAudioDestinationNode(AudioContextBase& context)
+DefaultAudioDestinationNode::DefaultAudioDestinationNode(BaseAudioContext& context)
     : AudioDestinationNode(context, AudioDestination::hardwareSampleRate())
 {
     // Node-specific default mixing rules.
index 96c837c..3253971 100644 (file)
@@ -33,7 +33,7 @@ class AudioDestination;
 class DefaultAudioDestinationNode final : public AudioDestinationNode {
     WTF_MAKE_ISO_ALLOCATED(DefaultAudioDestinationNode);
 public:
-    static Ref<DefaultAudioDestinationNode> create(AudioContextBase& context)
+    static Ref<DefaultAudioDestinationNode> create(BaseAudioContext& context)
     {
         return adoptRef(*new DefaultAudioDestinationNode(context));     
     }
@@ -41,7 +41,7 @@ public:
     virtual ~DefaultAudioDestinationNode();
     
 private:
-    explicit DefaultAudioDestinationNode(AudioContextBase&);
+    explicit DefaultAudioDestinationNode(BaseAudioContext&);
     void createDestination();
 
     void initialize() final;
index 40527c8..3f29ab3 100644 (file)
@@ -37,14 +37,14 @@ WTF_MAKE_ISO_ALLOCATED_IMPL(DelayNode);
 
 const double maximumAllowedDelayTime = 180;
 
-inline DelayNode::DelayNode(AudioContextBase& context, float sampleRate, double maxDelayTime)
+inline DelayNode::DelayNode(BaseAudioContext& context, float sampleRate, double maxDelayTime)
     : AudioBasicProcessorNode(context, sampleRate)
 {
     setNodeType(NodeTypeDelay);
     m_processor = makeUnique<DelayProcessor>(context, sampleRate, 1, maxDelayTime);
 }
 
-ExceptionOr<Ref<DelayNode>> DelayNode::create(AudioContextBase& context, float sampleRate, double maxDelayTime)
+ExceptionOr<Ref<DelayNode>> DelayNode::create(BaseAudioContext& context, float sampleRate, double maxDelayTime)
 {
     if (maxDelayTime <= 0 || maxDelayTime >= maximumAllowedDelayTime)
         return Exception { NotSupportedError };
index b6afdd0..fb46917 100644 (file)
@@ -31,12 +31,12 @@ namespace WebCore {
 class DelayNode final : public AudioBasicProcessorNode {
     WTF_MAKE_ISO_ALLOCATED(DelayNode);
 public:
-    static ExceptionOr<Ref<DelayNode>> create(AudioContextBase&, float sampleRate, double maxDelayTime);
+    static ExceptionOr<Ref<DelayNode>> create(BaseAudioContext&, float sampleRate, double maxDelayTime);
 
     AudioParam* delayTime();
 
 private:
-    DelayNode(AudioContextBase&, float sampleRate, double maxDelayTime);
+    DelayNode(BaseAudioContext&, float sampleRate, double maxDelayTime);
 };
 
 } // namespace WebCore
index cbbfd9f..ef745d8 100644 (file)
@@ -32,7 +32,7 @@
 
 namespace WebCore {
 
-DelayProcessor::DelayProcessor(AudioContextBase& context, float sampleRate, unsigned numberOfChannels, double maxDelayTime)
+DelayProcessor::DelayProcessor(BaseAudioContext& context, float sampleRate, unsigned numberOfChannels, double maxDelayTime)
     : AudioDSPKernelProcessor(sampleRate, numberOfChannels)
     , m_maxDelayTime(maxDelayTime)
 {
index cbac79e..0e723ee 100644 (file)
@@ -36,7 +36,7 @@ class AudioDSPKernel;
 class DelayProcessor final : public AudioDSPKernelProcessor {
     WTF_MAKE_FAST_ALLOCATED;
 public:
-    DelayProcessor(AudioContextBase&, float sampleRate, unsigned numberOfChannels, double maxDelayTime);
+    DelayProcessor(BaseAudioContext&, float sampleRate, unsigned numberOfChannels, double maxDelayTime);
     virtual ~DelayProcessor();
     
     std::unique_ptr<AudioDSPKernel> createKernel() override;
index c33d1de..fda7acf 100644 (file)
@@ -41,7 +41,7 @@ namespace WebCore {
 
 WTF_MAKE_ISO_ALLOCATED_IMPL(DynamicsCompressorNode);
 
-DynamicsCompressorNode::DynamicsCompressorNode(AudioContextBase& context, float sampleRate)
+DynamicsCompressorNode::DynamicsCompressorNode(BaseAudioContext& context, float sampleRate)
     : AudioNode(context, sampleRate)
 {
     setNodeType(NodeTypeDynamicsCompressor);
index b7053b2..c6d7556 100644 (file)
@@ -35,7 +35,7 @@ class DynamicsCompressor;
 class DynamicsCompressorNode final : public AudioNode {
     WTF_MAKE_ISO_ALLOCATED(DynamicsCompressorNode);
 public:
-    static Ref<DynamicsCompressorNode> create(AudioContextBase& context, float sampleRate)
+    static Ref<DynamicsCompressorNode> create(BaseAudioContext& context, float sampleRate)
     {
         return adoptRef(*new DynamicsCompressorNode(context, sampleRate));
     }
@@ -62,7 +62,7 @@ private:
     double tailTime() const override;
     double latencyTime() const override;
 
-    DynamicsCompressorNode(AudioContextBase&, float sampleRate);
+    DynamicsCompressorNode(BaseAudioContext&, float sampleRate);
 
     std::unique_ptr<DynamicsCompressor> m_dynamicsCompressor;
     RefPtr<AudioParam> m_threshold;
index 984d119..557e701 100644 (file)
@@ -37,7 +37,7 @@ namespace WebCore {
 
 WTF_MAKE_ISO_ALLOCATED_IMPL(GainNode);
 
-GainNode::GainNode(AudioContextBase& context, float sampleRate)
+GainNode::GainNode(BaseAudioContext& context, float sampleRate)
     : AudioNode(context, sampleRate)
     , m_lastGain(1.0)
     , m_sampleAccurateGainValues(AudioNode::ProcessingSizeInFrames) // FIXME: can probably share temp buffer in context
index dcf855a..48371b7 100644 (file)
@@ -38,7 +38,7 @@ class AudioContext;
 class GainNode final : public AudioNode {
     WTF_MAKE_ISO_ALLOCATED(GainNode);
 public:
-    static Ref<GainNode> create(AudioContextBase& context, float sampleRate)
+    static Ref<GainNode> create(BaseAudioContext& context, float sampleRate)
     {
         return adoptRef(*new GainNode(context, sampleRate));
     }
@@ -57,7 +57,7 @@ private:
     double tailTime() const override { return 0; }
     double latencyTime() const override { return 0; }
 
-    GainNode(AudioContextBase&, float sampleRate);
+    GainNode(BaseAudioContext&, float sampleRate);
 
     float m_lastGain; // for de-zippering
     RefPtr<AudioParam> m_gain;
index c675d7d..b173259 100644 (file)
@@ -43,12 +43,12 @@ namespace WebCore {
 
 WTF_MAKE_ISO_ALLOCATED_IMPL(MediaElementAudioSourceNode);
 
-Ref<MediaElementAudioSourceNode> MediaElementAudioSourceNode::create(AudioContextBase& context, HTMLMediaElement& mediaElement)
+Ref<MediaElementAudioSourceNode> MediaElementAudioSourceNode::create(BaseAudioContext& context, HTMLMediaElement& mediaElement)
 {
     return adoptRef(*new MediaElementAudioSourceNode(context, mediaElement));
 }
 
-MediaElementAudioSourceNode::MediaElementAudioSourceNode(AudioContextBase& context, HTMLMediaElement& mediaElement)
+MediaElementAudioSourceNode::MediaElementAudioSourceNode(BaseAudioContext& context, HTMLMediaElement& mediaElement)
     : AudioNode(context, context.sampleRate())
     , m_mediaElement(mediaElement)
     , m_sourceNumberOfChannels(0)
@@ -97,7 +97,7 @@ void MediaElementAudioSourceNode::setFormat(size_t numberOfChannels, float sourc
 
         {
             // The context must be locked when changing the number of output channels.
-            AudioContextBase::AutoLocker contextLocker(context());
+            BaseAudioContext::AutoLocker contextLocker(context());
 
             // Do any necesssary re-configuration to the output's number of channels.
             output(0)->setNumberOfChannels(numberOfChannels);
index 71d0ed1..72e1a03 100644 (file)
@@ -40,7 +40,7 @@ class AudioContext;
 class MediaElementAudioSourceNode final : public AudioNode, public AudioSourceProviderClient {
     WTF_MAKE_ISO_ALLOCATED(MediaElementAudioSourceNode);
 public:
-    static Ref<MediaElementAudioSourceNode> create(AudioContextBase&, HTMLMediaElement&);
+    static Ref<MediaElementAudioSourceNode> create(BaseAudioContext&, HTMLMediaElement&);
 
     virtual ~MediaElementAudioSourceNode();
 
@@ -57,7 +57,7 @@ public:
     void unlock();
 
 private:
-    MediaElementAudioSourceNode(AudioContextBase&, HTMLMediaElement&);
+    MediaElementAudioSourceNode(BaseAudioContext&, HTMLMediaElement&);
 
     double tailTime() const override { return 0; }
     double latencyTime() const override { return 0; }
index 30f1a22..082e526 100644 (file)
@@ -39,12 +39,12 @@ namespace WebCore {
 
 WTF_MAKE_ISO_ALLOCATED_IMPL(MediaStreamAudioDestinationNode);
 
-Ref<MediaStreamAudioDestinationNode> MediaStreamAudioDestinationNode::create(AudioContextBase& context, size_t numberOfChannels)
+Ref<MediaStreamAudioDestinationNode> MediaStreamAudioDestinationNode::create(BaseAudioContext& context, size_t numberOfChannels)
 {
     return adoptRef(*new MediaStreamAudioDestinationNode(context, numberOfChannels));
 }
 
-MediaStreamAudioDestinationNode::MediaStreamAudioDestinationNode(AudioContextBase& context, size_t numberOfChannels)
+MediaStreamAudioDestinationNode::MediaStreamAudioDestinationNode(BaseAudioContext& context, size_t numberOfChannels)
     : AudioBasicInspectorNode(context, context.sampleRate(), numberOfChannels)
     , m_source(MediaStreamAudioSource::create(context.sampleRate()))
     , m_stream(MediaStream::create(*context.document(), MediaStreamPrivate::create(context.document()->logger(), m_source.copyRef())))
index dbad928..7596f6f 100644 (file)
 
 namespace WebCore {
 
-class AudioContextBase;
+class BaseAudioContext;
 class MediaStreamAudioSource;
 
 class MediaStreamAudioDestinationNode final : public AudioBasicInspectorNode {
     WTF_MAKE_ISO_ALLOCATED(MediaStreamAudioDestinationNode);
 public:
-    static Ref<MediaStreamAudioDestinationNode> create(AudioContextBase&, size_t numberOfChannels);
+    static Ref<MediaStreamAudioDestinationNode> create(BaseAudioContext&, size_t numberOfChannels);
 
     virtual ~MediaStreamAudioDestinationNode();
 
@@ -49,7 +49,7 @@ public:
     void reset() final;
     
 private:
-    MediaStreamAudioDestinationNode(AudioContextBase&, size_t numberOfChannels);
+    MediaStreamAudioDestinationNode(BaseAudioContext&, size_t numberOfChannels);
 
     double tailTime() const final { return 0; }
     double latencyTime() const final { return 0; }
index 35e55a9..1025dd3 100644 (file)
@@ -38,12 +38,12 @@ namespace WebCore {
 
 WTF_MAKE_ISO_ALLOCATED_IMPL(MediaStreamAudioSourceNode);
 
-Ref<MediaStreamAudioSourceNode> MediaStreamAudioSourceNode::create(AudioContextBase& context, MediaStream& mediaStream, MediaStreamTrack& audioTrack)
+Ref<MediaStreamAudioSourceNode> MediaStreamAudioSourceNode::create(BaseAudioContext& context, MediaStream& mediaStream, MediaStreamTrack& audioTrack)
 {
     return adoptRef(*new MediaStreamAudioSourceNode(context, mediaStream, audioTrack));
 }
 
-MediaStreamAudioSourceNode::MediaStreamAudioSourceNode(AudioContextBase& context, MediaStream& mediaStream, MediaStreamTrack& audioTrack)
+MediaStreamAudioSourceNode::MediaStreamAudioSourceNode(BaseAudioContext& context, MediaStream& mediaStream, MediaStreamTrack& audioTrack)
     : AudioNode(context, context.sampleRate())
     , m_mediaStream(mediaStream)
     , m_audioTrack(audioTrack)
index c72d312..47c8385 100644 (file)
@@ -40,7 +40,7 @@ class MultiChannelResampler;
 class MediaStreamAudioSourceNode final : public AudioNode, public AudioSourceProviderClient {
     WTF_MAKE_ISO_ALLOCATED(MediaStreamAudioSourceNode);
 public:
-    static Ref<MediaStreamAudioSourceNode> create(AudioContextBase&, MediaStream&, MediaStreamTrack&);
+    static Ref<MediaStreamAudioSourceNode> create(BaseAudioContext&, MediaStream&, MediaStreamTrack&);
 
     virtual ~MediaStreamAudioSourceNode();
 
@@ -54,7 +54,7 @@ public:
     void setFormat(size_t numberOfChannels, float sampleRate) override;
 
 private:
-    MediaStreamAudioSourceNode(AudioContextBase&, MediaStream&, MediaStreamTrack&);
+    MediaStreamAudioSourceNode(BaseAudioContext&, MediaStream&, MediaStreamTrack&);
 
     double tailTime() const override { return 0; }
     double latencyTime() const override { return 0; }
index fec508c..8ebb115 100644 (file)
@@ -41,7 +41,7 @@ WTF_MAKE_ISO_ALLOCATED_IMPL(OfflineAudioDestinationNode);
     
 const size_t renderQuantumSize = 128;    
 
-OfflineAudioDestinationNode::OfflineAudioDestinationNode(AudioContextBase& context, AudioBuffer* renderTarget)
+OfflineAudioDestinationNode::OfflineAudioDestinationNode(BaseAudioContext& context, AudioBuffer* renderTarget)
     : AudioDestinationNode(context, renderTarget->sampleRate())
     , m_renderTarget(renderTarget)
     , m_startedRendering(false)
index f8710fd..6acb8de 100644 (file)
@@ -37,7 +37,7 @@ class AudioContext;
 class OfflineAudioDestinationNode final : public AudioDestinationNode {
     WTF_MAKE_ISO_ALLOCATED(OfflineAudioDestinationNode);
 public:
-    static Ref<OfflineAudioDestinationNode> create(AudioContextBase& context, AudioBuffer* renderTarget)
+    static Ref<OfflineAudioDestinationNode> create(BaseAudioContext& context, AudioBuffer* renderTarget)
     {
         return adoptRef(*new OfflineAudioDestinationNode(context, renderTarget));
     }
@@ -55,7 +55,7 @@ public:
     float sampleRate() const override { return m_renderTarget->sampleRate(); }
 
 private:
-    OfflineAudioDestinationNode(AudioContextBase&, AudioBuffer* renderTarget);
+    OfflineAudioDestinationNode(BaseAudioContext&, AudioBuffer* renderTarget);
 
     // This AudioNode renders into this AudioBuffer.
     RefPtr<AudioBuffer> m_renderTarget;
index e8f31c7..8161bfb 100644 (file)
@@ -45,12 +45,12 @@ PeriodicWave* OscillatorNode::s_periodicWaveSquare = nullptr;
 PeriodicWave* OscillatorNode::s_periodicWaveSawtooth = nullptr;
 PeriodicWave* OscillatorNode::s_periodicWaveTriangle = nullptr;
 
-Ref<OscillatorNode> OscillatorNode::create(AudioContextBase& context, float sampleRate)
+Ref<OscillatorNode> OscillatorNode::create(BaseAudioContext& context, float sampleRate)
 {
     return adoptRef(*new OscillatorNode(context, sampleRate));
 }
 
-OscillatorNode::OscillatorNode(AudioContextBase& context, float sampleRate)
+OscillatorNode::OscillatorNode(BaseAudioContext& context, float sampleRate)
     : AudioScheduledSourceNode(context, sampleRate)
     , m_firstRender(true)
     , m_virtualReadIndex(0)
index 0895bce..4632766 100644 (file)
@@ -45,7 +45,7 @@ public:
         Custom
     };
 
-    static Ref<OscillatorNode> create(AudioContextBase&, float sampleRate);
+    static Ref<OscillatorNode> create(BaseAudioContext&, float sampleRate);
 
     virtual ~OscillatorNode();
 
@@ -60,7 +60,7 @@ public:
     void setPeriodicWave(PeriodicWave*);
 
 private:
-    OscillatorNode(AudioContextBase&, float sampleRate);
+    OscillatorNode(BaseAudioContext&, float sampleRate);
 
     void process(size_t framesToProcess) final;
     void reset() final;
index 99ebc5c..f81d689 100644 (file)
@@ -48,7 +48,7 @@ static void fixNANs(double &x)
         x = 0.0;
 }
 
-PannerNodeBase::PannerNodeBase(AudioContextBase& context, float sampleRate)
+PannerNodeBase::PannerNodeBase(BaseAudioContext& context, float sampleRate)
     : AudioNode(context, sampleRate)
 {
 }
index 407ca41..f2514cb 100644 (file)
@@ -49,7 +49,7 @@ public:
     virtual float dopplerRate() = 0;
 
 protected:
-    PannerNodeBase(AudioContextBase&, float sampleRate);
+    PannerNodeBase(BaseAudioContext&, float sampleRate);
 };
 
 // PannerNode is an AudioNode with one input and one output.
@@ -69,9 +69,6 @@ public:
 
     virtual ~PannerNode();
 
-    BaseAudioContext& context() { return downcast<BaseAudioContext>(AudioNode::context()); }
-    const BaseAudioContext& context() const { return downcast<BaseAudioContext>(AudioNode::context()); }
-
     // AudioNode
     void process(size_t framesToProcess) override;
     void pullInputs(size_t framesToProcess) override;
index 7610b15..22728b0 100644 (file)
@@ -44,12 +44,12 @@ namespace WebCore {
 
 WTF_MAKE_ISO_ALLOCATED_IMPL(ScriptProcessorNode);
 
-Ref<ScriptProcessorNode> ScriptProcessorNode::create(AudioContextBase& context, float sampleRate, size_t bufferSize, unsigned numberOfInputChannels, unsigned numberOfOutputChannels)
+Ref<ScriptProcessorNode> ScriptProcessorNode::create(BaseAudioContext& context, float sampleRate, size_t bufferSize, unsigned numberOfInputChannels, unsigned numberOfOutputChannels)
 {
     return adoptRef(*new ScriptProcessorNode(context, sampleRate, bufferSize, numberOfInputChannels, numberOfOutputChannels));
 }
 
-ScriptProcessorNode::ScriptProcessorNode(AudioContextBase& context, float sampleRate, size_t bufferSize, unsigned numberOfInputChannels, unsigned numberOfOutputChannels)
+ScriptProcessorNode::ScriptProcessorNode(BaseAudioContext& context, float sampleRate, size_t bufferSize, unsigned numberOfInputChannels, unsigned numberOfOutputChannels)
     : AudioNode(context, sampleRate)
     , ActiveDOMObject(context.scriptExecutionContext())
     , m_doubleBufferIndex(0)
index 4c14a07..57bfc7d 100644 (file)
@@ -52,7 +52,7 @@ public:
     // This value controls how frequently the onaudioprocess event handler is called and how many sample-frames need to be processed each call.
     // Lower numbers for bufferSize will result in a lower (better) latency. Higher numbers will be necessary to avoid audio breakup and glitches.
     // The value chosen must carefully balance between latency and audio quality.
-    static Ref<ScriptProcessorNode> create(AudioContextBase&, float sampleRate, size_t bufferSize, unsigned numberOfInputChannels, unsigned numberOfOutputChannels);
+    static Ref<ScriptProcessorNode> create(BaseAudioContext&, float sampleRate, size_t bufferSize, unsigned numberOfInputChannels, unsigned numberOfOutputChannels);
 
     virtual ~ScriptProcessorNode();
 
@@ -71,7 +71,7 @@ private:
     double tailTime() const override;
     double latencyTime() const override;
 
-    ScriptProcessorNode(AudioContextBase&, float sampleRate, size_t bufferSize, unsigned numberOfInputChannels, unsigned numberOfOutputChannels);
+    ScriptProcessorNode(BaseAudioContext&, float sampleRate, size_t bufferSize, unsigned numberOfInputChannels, unsigned numberOfOutputChannels);
 
     void fireProcessEvent();
 
index ae4af9f..c441170 100644 (file)
@@ -35,7 +35,7 @@ namespace WebCore {
 
 WTF_MAKE_ISO_ALLOCATED_IMPL(WaveShaperNode);
 
-WaveShaperNode::WaveShaperNode(AudioContextBase& context)
+WaveShaperNode::WaveShaperNode(BaseAudioContext& context)
     : AudioBasicProcessorNode(context, context.sampleRate())
 {
     setNodeType(NodeTypeWaveShaper);
index 5f49328..f5a28d8 100644 (file)
@@ -33,7 +33,7 @@ namespace WebCore {
 class WaveShaperNode final : public AudioBasicProcessorNode {
     WTF_MAKE_ISO_ALLOCATED(WaveShaperNode);
 public:
-    static Ref<WaveShaperNode> create(AudioContextBase& context)
+    static Ref<WaveShaperNode> create(BaseAudioContext& context)
     {
         return adoptRef(*new WaveShaperNode(context));
     }
@@ -49,7 +49,7 @@ public:
     double latency() const { return latencyTime(); }
 
 private:    
-    explicit WaveShaperNode(AudioContextBase&);
+    explicit WaveShaperNode(BaseAudioContext&);
 
     WaveShaperProcessor* waveShaperProcessor() { return static_cast<WaveShaperProcessor*>(processor()); }
 };
index f402081..0e3e65a 100644 (file)
 
 #include "WebKitAudioContext.h"
 
-#include "AnalyserNode.h"
-#include "AsyncAudioDecoder.h"
-#include "AudioBuffer.h"
-#include "AudioBufferCallback.h"
-#include "AudioBufferSourceNode.h"
-#include "AudioListener.h"
-#include "AudioNodeInput.h"
-#include "AudioNodeOutput.h"
-#include "AudioSession.h"
-#include "BiquadFilterNode.h"
-#include "ChannelMergerNode.h"
-#include "ChannelSplitterNode.h"
-#include "ConvolverNode.h"
-#include "DefaultAudioDestinationNode.h"
-#include "DelayNode.h"
-#include "Document.h"
-#include "DynamicsCompressorNode.h"
-#include "EventNames.h"
-#include "FFTFrame.h"
-#include "Frame.h"
-#include "FrameLoader.h"
-#include "GainNode.h"
-#include "GenericEventQueue.h"
-#include "HRTFDatabaseLoader.h"
-#include "HRTFPanner.h"
 #include "JSDOMPromiseDeferred.h"
-#include "Logging.h"
-#include "NetworkingContext.h"
-#include "OfflineAudioCompletionEvent.h"
-#include "OfflineAudioDestinationNode.h"
-#include "OscillatorNode.h"
-#include "Page.h"
-#include "PeriodicWave.h"
-#include "PlatformMediaSessionManager.h"
-#include "ScriptController.h"
-#include "ScriptProcessorNode.h"
-#include "WaveShaperNode.h"
 #include "WebKitAudioPannerNode.h"
-#include <JavaScriptCore/ScriptCallStack.h>
+#include <wtf/IsoMallocInlines.h>
 
 #if ENABLE(MEDIA_STREAM)
 #include "MediaStream.h"
 #include "MediaElementAudioSourceNode.h"
 #endif
 
-#if DEBUG_AUDIONODE_REFERENCES
-#include <stdio.h>
-#endif
-
-#if USE(GSTREAMER)
-#include "GStreamerCommon.h"
-#endif
-
-#if PLATFORM(IOS_FAMILY)
-#include "ScriptController.h"
-#include "Settings.h"
-#endif
-
-#include <JavaScriptCore/ArrayBuffer.h>
-#include <wtf/Atomics.h>
-#include <wtf/IsoMallocInlines.h>
-#include <wtf/MainThread.h>
-#include <wtf/Ref.h>
-#include <wtf/RefCounted.h>
-#include <wtf/Scope.h>
-#include <wtf/text/WTFString.h>
-
-const unsigned MaxPeriodicWaveLength = 4096;
-
 namespace WebCore {
 
 WTF_MAKE_ISO_ALLOCATED_IMPL(WebKitAudioContext);
 
 #define RELEASE_LOG_IF_ALLOWED(fmt, ...) RELEASE_LOG_IF(document() && document()->page() && document()->page()->isAlwaysOnLoggingAllowed(), Media, "%p - WebKitAudioContext::" fmt, this, ##__VA_ARGS__)
 
-bool WebKitAudioContext::isSampleRateRangeGood(float sampleRate)
-{
-    // FIXME: It would be nice if the minimum sample-rate could be less than 44.1KHz,
-    // but that will require some fixes in HRTFPanner::fftSizeForSampleRate(), and some testing there.
-    return sampleRate >= 44100 && sampleRate <= 96000;
-}
-
 #if OS(WINDOWS)
 // Don't allow more than this number of simultaneous AudioContexts talking to hardware.
 constexpr unsigned maxHardwareContexts = 4;
 #endif
-unsigned WebKitAudioContext::s_hardwareContextCount = 0;
 
 ExceptionOr<Ref<WebKitAudioContext>> WebKitAudioContext::create(Document& document)
 {
@@ -139,226 +71,14 @@ ExceptionOr<Ref<WebKitAudioContext>> WebKitAudioContext::create(Document& docume
 
 // Constructor for rendering to the audio hardware.
 WebKitAudioContext::WebKitAudioContext(Document& document)
-    : AudioContextBase(document)
-#if !RELEASE_LOG_DISABLED
-    , m_logger(document.logger())
-    , m_logIdentifier(uniqueLogIdentifier())
-#endif
-    , m_mediaSession(PlatformMediaSession::create(PlatformMediaSessionManager::sharedManager(), *this))
-    , m_eventQueue(MainThreadGenericEventQueue::create(*this))
+    : BaseAudioContext(document)
 {
-    // According to spec AudioContext must die only after page navigate.
-    // Lets mark it as ActiveDOMObject with pending activity and unmark it in clear method.
-    makePendingActivity();
-
-    constructCommon();
-
-    m_destinationNode = DefaultAudioDestinationNode::create(*this);
-
-    // Initialize the destination node's muted state to match the page's current muted state.
-    pageMutedStateDidChange();
-
-    document.addAudioProducer(*this);
-    document.registerForVisibilityStateChangedCallbacks(*this);
 }
 
 // Constructor for offline (non-realtime) rendering.
 WebKitAudioContext::WebKitAudioContext(Document& document, AudioBuffer* renderTarget)
-    : AudioContextBase(document)
-#if !RELEASE_LOG_DISABLED
-    , m_logger(document.logger())
-    , m_logIdentifier(uniqueLogIdentifier())
-#endif
-    , m_isOfflineContext(true)
-    , m_mediaSession(PlatformMediaSession::create(PlatformMediaSessionManager::sharedManager(), *this))
-    , m_eventQueue(MainThreadGenericEventQueue::create(*this))
-    , m_renderTarget(renderTarget)
-{
-    constructCommon();
-
-    // Create a new destination for offline rendering.
-    m_destinationNode = OfflineAudioDestinationNode::create(*this, m_renderTarget.get());
-}
-
-void WebKitAudioContext::constructCommon()
-{
-    FFTFrame::initialize();
-
-    m_listener = AudioListener::create();
-
-    ASSERT(document());
-    if (document()->audioPlaybackRequiresUserGesture())
-        addBehaviorRestriction(RequireUserGestureForAudioStartRestriction);
-    else
-        m_restrictions = NoRestrictions;
-
-#if PLATFORM(COCOA)
-    addBehaviorRestriction(RequirePageConsentForAudioStartRestriction);
-#endif
-}
-
-WebKitAudioContext::~WebKitAudioContext()
-{
-#if DEBUG_AUDIONODE_REFERENCES
-    fprintf(stderr, "%p: WebKitAudioContext::~WebKitAudioContext()\n", this);
-#endif
-    ASSERT(!m_isInitialized);
-    ASSERT(m_isStopScheduled);
-    ASSERT(m_nodesToDelete.isEmpty());
-    ASSERT(m_referencedNodes.isEmpty());
-    ASSERT(m_finishedNodes.isEmpty()); // FIXME (bug 105870): This assertion fails on tests sometimes.
-    ASSERT(m_automaticPullNodes.isEmpty());
-    if (m_automaticPullNodesNeedUpdating)
-        m_renderingAutomaticPullNodes.resize(m_automaticPullNodes.size());
-    ASSERT(m_renderingAutomaticPullNodes.isEmpty());
-    // FIXME: Can we assert that m_deferredFinishDerefList is empty?
-
-    if (!isOfflineContext() && scriptExecutionContext()) {
-        document()->removeAudioProducer(*this);
-        document()->unregisterForVisibilityStateChangedCallbacks(*this);
-    }
-}
-
-void WebKitAudioContext::lazyInitialize()
-{
-    ASSERT(!m_isStopScheduled);
-
-    if (m_isInitialized)
-        return;
-
-    // Don't allow the context to initialize a second time after it's already been explicitly uninitialized.
-    ASSERT(!m_isAudioThreadFinished);
-    if (m_isAudioThreadFinished)
-        return;
-
-    if (m_destinationNode) {
-        m_destinationNode->initialize();
-
-        if (!isOfflineContext()) {
-            // This starts the audio thread. The destination node's provideInput() method will now be called repeatedly to render audio.
-            // Each time provideInput() is called, a portion of the audio stream is rendered. Let's call this time period a "render quantum".
-            // NOTE: for now default AudioContext does not need an explicit startRendering() call from JavaScript.
-            // We may want to consider requiring it for symmetry with OfflineAudioContext.
-            startRendering();
-            ++s_hardwareContextCount;
-        }
-    }
-    m_isInitialized = true;
-}
-
-void WebKitAudioContext::clear()
-{
-    Ref<WebKitAudioContext> protectedThis(*this);
-
-    // We have to release our reference to the destination node before the context will ever be deleted since the destination node holds a reference to the context.
-    if (m_destinationNode)
-        m_destinationNode = nullptr;
-
-    // Audio thread is dead. Nobody will schedule node deletion action. Let's do it ourselves.
-    do {
-        deleteMarkedNodes();
-        m_nodesToDelete.appendVector(m_nodesMarkedForDeletion);
-        m_nodesMarkedForDeletion.clear();
-    } while (m_nodesToDelete.size());
-
-    clearPendingActivity();
-}
-
-void WebKitAudioContext::uninitialize()
-{
-    ALWAYS_LOG(LOGIDENTIFIER);
-
-    ASSERT(isMainThread());
-
-    if (!m_isInitialized)
-        return;
-
-    // This stops the audio thread and all audio rendering.
-    if (m_destinationNode)
-        m_destinationNode->uninitialize();
-
-    // Don't allow the context to initialize a second time after it's already been explicitly uninitialized.
-    m_isAudioThreadFinished = true;
-
-    if (!isOfflineContext()) {
-        ASSERT(s_hardwareContextCount);
-        --s_hardwareContextCount;
-
-        // Offline contexts move to 'Closed' state when dispatching the completion event.
-        setState(State::Closed);
-    }
-
-    // Get rid of the sources which may still be playing.
-    derefUnfinishedSourceNodes();
-
-    m_isInitialized = false;
-}
-
-bool WebKitAudioContext::isInitialized() const
-{
-    return m_isInitialized;
-}
-
-void WebKitAudioContext::addReaction(State state, DOMPromiseDeferred<void>&& promise)
-{
-    size_t stateIndex = static_cast<size_t>(state);
-    if (stateIndex >= m_stateReactions.size())
-        m_stateReactions.grow(stateIndex + 1);
-
-    m_stateReactions[stateIndex].append(WTFMove(promise));
-}
-
-void WebKitAudioContext::setState(State state)
-{
-    if (m_state == state)
-        return;
-
-    m_state = state;
-    m_eventQueue->enqueueEvent(Event::create(eventNames().statechangeEvent, Event::CanBubble::Yes, Event::IsCancelable::No));
-
-    size_t stateIndex = static_cast<size_t>(state);
-    if (stateIndex >= m_stateReactions.size())
-        return;
-
-    Vector<DOMPromiseDeferred<void>> reactions;
-    m_stateReactions[stateIndex].swap(reactions);
-
-    for (auto& promise : reactions)
-        promise.resolve();
-}
-
-void WebKitAudioContext::stop()
-{
-    ALWAYS_LOG(LOGIDENTIFIER);
-
-    ASSERT(isMainThread());
-
-    // Usually ScriptExecutionContext calls stop twice.
-    if (m_isStopScheduled)
-        return;
-    m_isStopScheduled = true;
-
-    ASSERT(document());
-    document()->updateIsPlayingMedia();
-
-    uninitialize();
-    clear();
-}
-
-void WebKitAudioContext::suspend(ReasonForSuspension)
+    : BaseAudioContext(document, renderTarget)
 {
-    if (state() == State::Running) {
-        m_mediaSession->beginInterruption(PlatformMediaSession::PlaybackSuspended);
-        document()->updateIsPlayingMedia();
-    }
-}
-
-void WebKitAudioContext::resume()
-{
-    if (state() == State::Interrupted) {
-        m_mediaSession->endInterruption(PlatformMediaSession::MayResumePlaying);
-        document()->updateIsPlayingMedia();
-    }
 }
 
 const char* WebKitAudioContext::activeDOMObjectName() const
@@ -366,89 +86,6 @@ const char* WebKitAudioContext::activeDOMObjectName() const
     return "WebKitAudioContext";
 }
 
-DocumentIdentifier WebKitAudioContext::hostingDocumentIdentifier() const
-{
-    auto* document = downcast<Document>(m_scriptExecutionContext);
-    return document ? document->identifier() : DocumentIdentifier { };
-}
-
-bool WebKitAudioContext::isSuspended() const
-{
-    return !document() || document()->activeDOMObjectsAreSuspended() || document()->activeDOMObjectsAreStopped();
-}
-
-void WebKitAudioContext::visibilityStateChanged()
-{
-    // Do not suspend if audio is audible.
-    if (!document() || mediaState() == MediaProducer::IsPlayingAudio || m_isStopScheduled)
-        return;
-
-    if (document()->hidden()) {
-        if (state() == State::Running) {
-            RELEASE_LOG_IF_ALLOWED("visibilityStateChanged() Suspending playback after going to the background");
-            m_mediaSession->beginInterruption(PlatformMediaSession::EnteringBackground);
-        }
-    } else {
-        if (state() == State::Interrupted) {
-            RELEASE_LOG_IF_ALLOWED("visibilityStateChanged() Resuming playback after entering foreground");
-            m_mediaSession->endInterruption(PlatformMediaSession::MayResumePlaying);
-        }
-    }
-}
-
-bool WebKitAudioContext::wouldTaintOrigin(const URL& url) const
-{
-    if (url.protocolIsData())
-        return false;
-
-    if (auto* document = this->document())
-        return !document->securityOrigin().canRequest(url);
-
-    return false;
-}
-
-ExceptionOr<Ref<AudioBuffer>> WebKitAudioContext::createBuffer(unsigned numberOfChannels, size_t numberOfFrames, float sampleRate)
-{
-    auto audioBuffer = AudioBuffer::create(numberOfChannels, numberOfFrames, sampleRate);
-    if (!audioBuffer)
-        return Exception { NotSupportedError };
-    return audioBuffer.releaseNonNull();
-}
-
-ExceptionOr<Ref<AudioBuffer>> WebKitAudioContext::createBuffer(ArrayBuffer& arrayBuffer, bool mixToMono)
-{
-    auto audioBuffer = AudioBuffer::createFromAudioFileData(arrayBuffer.data(), arrayBuffer.byteLength(), mixToMono, sampleRate());
-    if (!audioBuffer)
-        return Exception { SyntaxError };
-    return audioBuffer.releaseNonNull();
-}
-
-void WebKitAudioContext::decodeAudioData(Ref<ArrayBuffer>&& audioData, RefPtr<AudioBufferCallback>&& successCallback, RefPtr<AudioBufferCallback>&& errorCallback)
-{
-    if (!m_audioDecoder)
-        m_audioDecoder = makeUnique<AsyncAudioDecoder>();
-    m_audioDecoder->decodeAsync(WTFMove(audioData), sampleRate(), WTFMove(successCallback), WTFMove(errorCallback));
-}
-
-ExceptionOr<Ref<AudioBufferSourceNode>> WebKitAudioContext::createBufferSource()
-{
-    ALWAYS_LOG(LOGIDENTIFIER);
-
-    ASSERT(isMainThread());
-
-    if (m_isStopScheduled)
-        return Exception { InvalidStateError };
-
-    lazyInitialize();
-    Ref<AudioBufferSourceNode> node = AudioBufferSourceNode::create(*this, sampleRate());
-
-    // Because this is an AudioScheduledSourceNode, the context keeps a reference until it has finished playing.
-    // When this happens, AudioScheduledSourceNode::finish() calls WebKitAudioContext::notifyNodeFinishedProcessing().
-    refNode(node);
-
-    return node;
-}
-
 #if ENABLE(VIDEO)
 
 ExceptionOr<Ref<MediaElementAudioSourceNode>> WebKitAudioContext::createMediaElementSource(HTMLMediaElement& mediaElement)
@@ -457,7 +94,7 @@ ExceptionOr<Ref<MediaElementAudioSourceNode>> WebKitAudioContext::createMediaEle
 
     ASSERT(isMainThread());
 
-    if (m_isStopScheduled || mediaElement.audioSourceNode())
+    if (isStopped() || mediaElement.audioSourceNode())
         return Exception { InvalidStateError };
 
     lazyInitialize();
@@ -480,7 +117,7 @@ ExceptionOr<Ref<MediaStreamAudioSourceNode>> WebKitAudioContext::createMediaStre
 
     ASSERT(isMainThread());
 
-    if (m_isStopScheduled)
+    if (isStopped())
         return Exception { InvalidStateError };
 
     auto audioTracks = mediaStream.getAudioTracks();
@@ -508,7 +145,7 @@ ExceptionOr<Ref<MediaStreamAudioSourceNode>> WebKitAudioContext::createMediaStre
 
 ExceptionOr<Ref<MediaStreamAudioDestinationNode>> WebKitAudioContext::createMediaStreamDestination()
 {
-    if (m_isStopScheduled)
+    if (isStopped())
         return Exception { InvalidStateError };
 
     // FIXME: Add support for an optional argument which specifies the number of channels.
@@ -518,885 +155,40 @@ ExceptionOr<Ref<MediaStreamAudioDestinationNode>> WebKitAudioContext::createMedi
 
 #endif
 
-ExceptionOr<Ref<ScriptProcessorNode>> WebKitAudioContext::createScriptProcessor(size_t bufferSize, size_t numberOfInputChannels, size_t numberOfOutputChannels)
-{
-    ALWAYS_LOG(LOGIDENTIFIER);
-
-    ASSERT(isMainThread());
-
-    if (m_isStopScheduled)
-        return Exception { InvalidStateError };
-
-    lazyInitialize();
-
-    // W3C Editor's Draft 06 June 2017
-    //  https://webaudio.github.io/web-audio-api/#widl-BaseAudioContext-createScriptProcessor-ScriptProcessorNode-unsigned-long-bufferSize-unsigned-long-numberOfInputChannels-unsigned-long-numberOfOutputChannels
-
-    // The bufferSize parameter determines the buffer size in units of sample-frames. If it's not passed in,
-    // or if the value is 0, then the implementation will choose the best buffer size for the given environment,
-    // which will be constant power of 2 throughout the lifetime of the node. ... If the value of this parameter
-    // is not one of the allowed power-of-2 values listed above, an IndexSizeError must be thrown.
-    switch (bufferSize) {
-    case 0:
-#if USE(AUDIO_SESSION)
-        // Pick a value between 256 (2^8) and 16384 (2^14), based on the buffer size of the current AudioSession:
-        bufferSize = 1 << std::max<size_t>(8, std::min<size_t>(14, std::log2(AudioSession::sharedSession().bufferSize())));
-#else
-        bufferSize = 2048;
-#endif
-        break;
-    case 256:
-    case 512:
-    case 1024:
-    case 2048:
-    case 4096:
-    case 8192:
-    case 16384:
-        break;
-    default:
-        return Exception { IndexSizeError };
-    }
-
-    // An IndexSizeError exception must be thrown if bufferSize or numberOfInputChannels or numberOfOutputChannels
-    // are outside the valid range. It is invalid for both numberOfInputChannels and numberOfOutputChannels to be zero.
-    // In this case an IndexSizeError must be thrown.
-
-    if (!numberOfInputChannels && !numberOfOutputChannels)
-        return Exception { NotSupportedError };
-
-    // This parameter [numberOfInputChannels] determines the number of channels for this node's input. Values of
-    // up to 32 must be supported. A NotSupportedError must be thrown if the number of channels is not supported.
-
-    if (numberOfInputChannels > maxNumberOfChannels())
-        return Exception { NotSupportedError };
-
-    // This parameter [numberOfOutputChannels] determines the number of channels for this node's output. Values of
-    // up to 32 must be supported. A NotSupportedError must be thrown if the number of channels is not supported.
-
-    if (numberOfOutputChannels > maxNumberOfChannels())
-        return Exception { NotSupportedError };
-
-    auto node = ScriptProcessorNode::create(*this, sampleRate(), bufferSize, numberOfInputChannels, numberOfOutputChannels);
-
-    refNode(node); // context keeps reference until we stop making javascript rendering callbacks
-    return node;
-}
-
-ExceptionOr<Ref<BiquadFilterNode>> WebKitAudioContext::createBiquadFilter()
-{
-    ALWAYS_LOG(LOGIDENTIFIER);
-
-    ASSERT(isMainThread());
-    if (m_isStopScheduled)
-        return Exception { InvalidStateError };
-
-    lazyInitialize();
-
-    return BiquadFilterNode::create(*this, sampleRate());
-}
-
-ExceptionOr<Ref<WaveShaperNode>> WebKitAudioContext::createWaveShaper()
-{
-    ALWAYS_LOG(LOGIDENTIFIER);
-
-    ASSERT(isMainThread());
-    if (m_isStopScheduled)
-        return Exception { InvalidStateError };
-
-    lazyInitialize();
-    return WaveShaperNode::create(*this);
-}
-
-ExceptionOr<Ref<WebKitAudioPannerNode>> WebKitAudioContext::createPanner()
+ExceptionOr<Ref<WebKitAudioPannerNode>> WebKitAudioContext::createWebKitPanner()
 {
     ALWAYS_LOG(LOGIDENTIFIER);
 
     ASSERT(isMainThread());
-    if (m_isStopScheduled)
+    if (isStopped())
         return Exception { InvalidStateError };
 
     lazyInitialize();
     return WebKitAudioPannerNode::create(*this, sampleRate());
 }
 
-ExceptionOr<Ref<ConvolverNode>> WebKitAudioContext::createConvolver()
-{
-    ALWAYS_LOG(LOGIDENTIFIER);
-
-    ASSERT(isMainThread());
-    if (m_isStopScheduled)
-        return Exception { InvalidStateError };
-
-    lazyInitialize();
-    return ConvolverNode::create(*this, sampleRate());
-}
-
-ExceptionOr<Ref<DynamicsCompressorNode>> WebKitAudioContext::createDynamicsCompressor()
-{
-    ALWAYS_LOG(LOGIDENTIFIER);
-
-    ASSERT(isMainThread());
-    if (m_isStopScheduled)
-        return Exception { InvalidStateError };
-
-    lazyInitialize();
-    return DynamicsCompressorNode::create(*this, sampleRate());
-}
-
-ExceptionOr<Ref<AnalyserNode>> WebKitAudioContext::createAnalyser()
-{
-    ALWAYS_LOG(LOGIDENTIFIER);
-
-    ASSERT(isMainThread());
-    if (m_isStopScheduled)
-        return Exception { InvalidStateError };
-
-    lazyInitialize();
-    return AnalyserNode::create(*this, sampleRate());
-}
-
-ExceptionOr<Ref<GainNode>> WebKitAudioContext::createGain()
-{
-    ALWAYS_LOG(LOGIDENTIFIER);
-
-    ASSERT(isMainThread());
-    if (m_isStopScheduled)
-        return Exception { InvalidStateError };
-
-    lazyInitialize();
-    return GainNode::create(*this, sampleRate());
-}
-
-ExceptionOr<Ref<DelayNode>> WebKitAudioContext::createDelay(double maxDelayTime)
+void WebKitAudioContext::close(DOMPromiseDeferred<void>&& promise)
 {
-    ALWAYS_LOG(LOGIDENTIFIER);
-
-    ASSERT(isMainThread());
-    if (m_isStopScheduled)
-        return Exception { InvalidStateError };
-
-    lazyInitialize();
-    return DelayNode::create(*this, sampleRate(), maxDelayTime);
-}
+    if (isOfflineContext() || isStopped()) {
+        promise.reject(InvalidStateError);
+        return;
+    }
 
-ExceptionOr<Ref<ChannelSplitterNode>> WebKitAudioContext::createChannelSplitter(size_t numberOfOutputs)
-{
-    ALWAYS_LOG(LOGIDENTIFIER);
+    if (state() == State::Closed || !destinationNode()) {
+        promise.resolve();
+        return;
+    }
 
-    ASSERT(isMainThread());
-    if (m_isStopScheduled)
-        return Exception { InvalidStateError };
+    addReaction(State::Closed, WTFMove(promise));
 
     lazyInitialize();
-    auto node = ChannelSplitterNode::create(*this, sampleRate(), numberOfOutputs);
-    if (!node)
-        return Exception { IndexSizeError };
-    return node.releaseNonNull();
-}
 
-ExceptionOr<Ref<ChannelMergerNode>> WebKitAudioContext::createChannelMerger(size_t numberOfInputs)
-{
-    ALWAYS_LOG(LOGIDENTIFIER);
-
-    ASSERT(isMainThread());
-    if (m_isStopScheduled)
-        return Exception { InvalidStateError };
-
-    lazyInitialize();
-    auto node = ChannelMergerNode::create(*this, sampleRate(), numberOfInputs);
-    if (!node)
-        return Exception { IndexSizeError };
-    return node.releaseNonNull();
+    destinationNode()->close([this, protectedThis = makeRef(*this)] {
+        setState(State::Closed);
+        uninitialize();
+    });
 }
 
-ExceptionOr<Ref<OscillatorNode>> WebKitAudioContext::createOscillator()
-{
-    ALWAYS_LOG(LOGIDENTIFIER);
-
-    ASSERT(isMainThread());
-    if (m_isStopScheduled)
-        return Exception { InvalidStateError };
-
-    lazyInitialize();
-
-    Ref<OscillatorNode> node = OscillatorNode::create(*this, sampleRate());
-
-    // Because this is an AudioScheduledSourceNode, the context keeps a reference until it has finished playing.
-    // When this happens, AudioScheduledSourceNode::finish() calls WebKitAudioContext::notifyNodeFinishedProcessing().
-    refNode(node);
-
-    return node;
-}
-
-ExceptionOr<Ref<PeriodicWave>> WebKitAudioContext::createPeriodicWave(Float32Array& real, Float32Array& imaginary)
-{
-    ALWAYS_LOG(LOGIDENTIFIER);
-
-    ASSERT(isMainThread());
-    if (m_isStopScheduled)
-        return Exception { InvalidStateError };
-
-    if (real.length() != imaginary.length() || (real.length() > MaxPeriodicWaveLength) || !real.length())
-        return Exception { IndexSizeError };
-    lazyInitialize();
-    return PeriodicWave::create(sampleRate(), real, imaginary);
-}
-
-void WebKitAudioContext::notifyNodeFinishedProcessing(AudioNode* node)
-{
-    ASSERT(isAudioThread());
-    m_finishedNodes.append(node);
-}
-
-void WebKitAudioContext::derefFinishedSourceNodes()
-{
-    ASSERT(isGraphOwner());
-    ASSERT(isAudioThread() || isAudioThreadFinished());
-    for (auto& node : m_finishedNodes)
-        derefNode(*node);
-
-    m_finishedNodes.clear();
-}
-
-void WebKitAudioContext::refNode(AudioNode& node)
-{
-    ASSERT(isMainThread());
-    AutoLocker locker(*this);
-
-    node.ref(AudioNode::RefTypeConnection);
-    m_referencedNodes.append(&node);
-}
-
-void WebKitAudioContext::derefNode(AudioNode& node)
-{
-    ASSERT(isGraphOwner());
-
-    node.deref(AudioNode::RefTypeConnection);
-
-    ASSERT(m_referencedNodes.contains(&node));
-    m_referencedNodes.removeFirst(&node);
-}
-
-void WebKitAudioContext::derefUnfinishedSourceNodes()
-{
-    ASSERT(isMainThread() && isAudioThreadFinished());
-    for (auto& node : m_referencedNodes)
-        node->deref(AudioNode::RefTypeConnection);
-
-    m_referencedNodes.clear();
-}
-
-void WebKitAudioContext::lock(bool& mustReleaseLock)
-{
-    // Don't allow regular lock in real-time audio thread.
-    ASSERT(isMainThread());
-
-    Thread& thisThread = Thread::current();
-
-    if (&thisThread == m_graphOwnerThread) {
-        // We already have the lock.
-        mustReleaseLock = false;
-    } else {
-        // Acquire the lock.
-        m_contextGraphMutex.lock();
-        m_graphOwnerThread = &thisThread;
-        mustReleaseLock = true;
-    }
-}
-
-bool WebKitAudioContext::tryLock(bool& mustReleaseLock)
-{
-    Thread& thisThread = Thread::current();
-    bool isAudioThread = &thisThread == audioThread();
-
-    // Try to catch cases of using try lock on main thread - it should use regular lock.
-    ASSERT(isAudioThread || isAudioThreadFinished());
-
-    if (!isAudioThread) {
-        // In release build treat tryLock() as lock() (since above ASSERT(isAudioThread) never fires) - this is the best we can do.
-        lock(mustReleaseLock);
-        return true;
-    }
-
-    bool hasLock;
-
-    if (&thisThread == m_graphOwnerThread) {
-        // Thread already has the lock.
-        hasLock = true;
-        mustReleaseLock = false;
-    } else {
-        // Don't already have the lock - try to acquire it.
-        hasLock = m_contextGraphMutex.tryLock();
-
-        if (hasLock)
-            m_graphOwnerThread = &thisThread;
-
-        mustReleaseLock = hasLock;
-    }
-
-    return hasLock;
-}
-
-void WebKitAudioContext::unlock()
-{
-    ASSERT(m_graphOwnerThread == &Thread::current());
-
-    m_graphOwnerThread = nullptr;
-    m_contextGraphMutex.unlock();
-}
-
-bool WebKitAudioContext::isAudioThread() const
-{
-    return m_audioThread == &Thread::current();
-}
-
-bool WebKitAudioContext::isGraphOwner() const
-{
-    return m_graphOwnerThread == &Thread::current();
-}
-
-void WebKitAudioContext::addDeferredFinishDeref(AudioNode* node)
-{
-    ASSERT(isAudioThread());
-    m_deferredFinishDerefList.append(node);
-}
-
-void WebKitAudioContext::handlePreRenderTasks()
-{
-    ASSERT(isAudioThread());
-
-    // At the beginning of every render quantum, try to update the internal rendering graph state (from main thread changes).
-    // It's OK if the tryLock() fails, we'll just take slightly longer to pick up the changes.
-    bool mustReleaseLock;
-    if (tryLock(mustReleaseLock)) {
-        // Fixup the state of any dirty AudioSummingJunctions and AudioNodeOutputs.
-        handleDirtyAudioSummingJunctions();
-        handleDirtyAudioNodeOutputs();
-
-        updateAutomaticPullNodes();
-
-        if (mustReleaseLock)
-            unlock();
-    }
-}
-
-void WebKitAudioContext::handlePostRenderTasks()
-{
-    ASSERT(isAudioThread());
-
-    // Must use a tryLock() here too. Don't worry, the lock will very rarely be contended and this method is called frequently.
-    // The worst that can happen is that there will be some nodes which will take slightly longer than usual to be deleted or removed
-    // from the render graph (in which case they'll render silence).
-    bool mustReleaseLock;
-    if (tryLock(mustReleaseLock)) {
-        // Take care of finishing any derefs where the tryLock() failed previously.
-        handleDeferredFinishDerefs();
-
-        // Dynamically clean up nodes which are no longer needed.
-        derefFinishedSourceNodes();
-
-        // Don't delete in the real-time thread. Let the main thread do it.
-        // Ref-counted objects held by certain AudioNodes may not be thread-safe.
-        scheduleNodeDeletion();
-
-        // Fixup the state of any dirty AudioSummingJunctions and AudioNodeOutputs.
-        handleDirtyAudioSummingJunctions();
-        handleDirtyAudioNodeOutputs();
-
-        updateAutomaticPullNodes();
-
-        if (mustReleaseLock)
-            unlock();
-    }
-}
-
-void WebKitAudioContext::handleDeferredFinishDerefs()
-{
-    ASSERT(isAudioThread() && isGraphOwner());
-    for (auto& node : m_deferredFinishDerefList)
-        node->finishDeref(AudioNode::RefTypeConnection);
-
-    m_deferredFinishDerefList.clear();
-}
-
-void WebKitAudioContext::markForDeletion(AudioNode& node)
-{
-    ASSERT(isGraphOwner());
-
-    if (isAudioThreadFinished())
-        m_nodesToDelete.append(&node);
-    else
-        m_nodesMarkedForDeletion.append(&node);
-
-    // This is probably the best time for us to remove the node from automatic pull list,
-    // since all connections are gone and we hold the graph lock. Then when handlePostRenderTasks()
-    // gets a chance to schedule the deletion work, updateAutomaticPullNodes() also gets a chance to
-    // modify m_renderingAutomaticPullNodes.
-    removeAutomaticPullNode(node);
-}
-
-void WebKitAudioContext::scheduleNodeDeletion()
-{
-    bool isGood = m_isInitialized && isGraphOwner();
-    ASSERT(isGood);
-    if (!isGood)
-        return;
-
-    // Make sure to call deleteMarkedNodes() on main thread.
-    if (m_nodesMarkedForDeletion.size() && !m_isDeletionScheduled) {
-        m_nodesToDelete.appendVector(m_nodesMarkedForDeletion);
-        m_nodesMarkedForDeletion.clear();
-
-        m_isDeletionScheduled = true;
-
-        callOnMainThread([protectedThis = makeRef(*this)]() mutable {
-            protectedThis->deleteMarkedNodes();
-        });
-    }
-}
-
-void WebKitAudioContext::deleteMarkedNodes()
-{
-    ASSERT(isMainThread());
-
-    // Protect this object from being deleted before we release the mutex locked by AutoLocker.
-    Ref<WebKitAudioContext> protectedThis(*this);
-    {
-        AutoLocker locker(*this);
-
-        while (m_nodesToDelete.size()) {
-            AudioNode* node = m_nodesToDelete.takeLast();
-
-            // Before deleting the node, clear out any AudioNodeInputs from m_dirtySummingJunctions.
-            unsigned numberOfInputs = node->numberOfInputs();
-            for (unsigned i = 0; i < numberOfInputs; ++i)
-                m_dirtySummingJunctions.remove(node->input(i));
-
-            // Before deleting the node, clear out any AudioNodeOutputs from m_dirtyAudioNodeOutputs.
-            unsigned numberOfOutputs = node->numberOfOutputs();
-            for (unsigned i = 0; i < numberOfOutputs; ++i)
-                m_dirtyAudioNodeOutputs.remove(node->output(i));
-
-            // Finally, delete it.
-            delete node;
-        }
-        m_isDeletionScheduled = false;
-    }
-}
-
-void WebKitAudioContext::markSummingJunctionDirty(AudioSummingJunction* summingJunction)
-{
-    ASSERT(isGraphOwner());
-    m_dirtySummingJunctions.add(summingJunction);
-}
-
-void WebKitAudioContext::removeMarkedSummingJunction(AudioSummingJunction* summingJunction)
-{
-    ASSERT(isMainThread());
-    AutoLocker locker(*this);
-    m_dirtySummingJunctions.remove(summingJunction);
-}
-
-EventTargetInterface WebKitAudioContext::eventTargetInterface() const
-{
-    return WebKitAudioContextEventTargetInterfaceType;
-}
-
-void WebKitAudioContext::markAudioNodeOutputDirty(AudioNodeOutput* output)
-{
-    ASSERT(isGraphOwner());
-    m_dirtyAudioNodeOutputs.add(output);
-}
-
-void WebKitAudioContext::handleDirtyAudioSummingJunctions()
-{
-    ASSERT(isGraphOwner());
-
-    for (auto& junction : m_dirtySummingJunctions)
-        junction->updateRenderingState();
-
-    m_dirtySummingJunctions.clear();
-}
-
-void WebKitAudioContext::handleDirtyAudioNodeOutputs()
-{
-    ASSERT(isGraphOwner());
-
-    for (auto& output : m_dirtyAudioNodeOutputs)
-        output->updateRenderingState();
-
-    m_dirtyAudioNodeOutputs.clear();
-}
-
-void WebKitAudioContext::addAutomaticPullNode(AudioNode& node)
-{
-    ASSERT(isGraphOwner());
-
-    if (m_automaticPullNodes.add(&node).isNewEntry)
-        m_automaticPullNodesNeedUpdating = true;
-}
-
-void WebKitAudioContext::removeAutomaticPullNode(AudioNode& node)
-{
-    ASSERT(isGraphOwner());
-
-    if (m_automaticPullNodes.remove(&node))
-        m_automaticPullNodesNeedUpdating = true;
-}
-
-void WebKitAudioContext::updateAutomaticPullNodes()
-{
-    ASSERT(isGraphOwner());
-
-    if (m_automaticPullNodesNeedUpdating) {
-        // Copy from m_automaticPullNodes to m_renderingAutomaticPullNodes.
-        m_renderingAutomaticPullNodes.resize(m_automaticPullNodes.size());
-
-        unsigned i = 0;
-        for (auto& output : m_automaticPullNodes)
-            m_renderingAutomaticPullNodes[i++] = output;
-
-        m_automaticPullNodesNeedUpdating = false;
-    }
-}
-
-void WebKitAudioContext::processAutomaticPullNodes(size_t framesToProcess)
-{
-    ASSERT(isAudioThread());
-
-    for (auto& node : m_renderingAutomaticPullNodes)
-        node->processIfNecessary(framesToProcess);
-}
-
-void WebKitAudioContext::nodeWillBeginPlayback()
-{
-    // Called by scheduled AudioNodes when clients schedule their start times.
-    // Prior to the introduction of suspend(), resume(), and stop(), starting
-    // a scheduled AudioNode would remove the user-gesture restriction, if present,
-    // and would thus unmute the context. Now that AudioContext stays in the
-    // "suspended" state if a user-gesture restriction is present, starting a
-    // schedule AudioNode should set the state to "running", but only if the
-    // user-gesture restriction is set.
-    if (userGestureRequiredForAudioStart())
-        startRendering();
-}
-
-static bool shouldDocumentAllowWebAudioToAutoPlay(const Document& document)
-{
-    if (document.processingUserGestureForMedia() || document.isCapturing())
-        return true;
-    return document.quirks().shouldAutoplayWebAudioForArbitraryUserGesture() && document.topDocument().hasHadUserInteraction();
-}
-
-bool WebKitAudioContext::willBeginPlayback()
-{
-    auto* document = this->document();
-    if (!document)
-        return false;
-
-    if (userGestureRequiredForAudioStart()) {
-        if (!shouldDocumentAllowWebAudioToAutoPlay(*document)) {
-            ALWAYS_LOG(LOGIDENTIFIER, "returning false, not processing user gesture or capturing");
-            return false;
-        }
-        removeBehaviorRestriction(WebKitAudioContext::RequireUserGestureForAudioStartRestriction);
-    }
-
-    if (pageConsentRequiredForAudioStart()) {
-        auto* page = document->page();
-        if (page && !page->canStartMedia()) {
-            document->addMediaCanStartListener(*this);
-            ALWAYS_LOG(LOGIDENTIFIER, "returning false, page doesn't allow media to start");
-            return false;
-        }
-        removeBehaviorRestriction(WebKitAudioContext::RequirePageConsentForAudioStartRestriction);
-    }
-
-    auto willBegin = m_mediaSession->clientWillBeginPlayback();
-    ALWAYS_LOG(LOGIDENTIFIER, "returning ", willBegin);
-
-    return willBegin;
-}
-
-bool WebKitAudioContext::willPausePlayback()
-{
-    auto* document = this->document();
-    if (!document)
-        return false;
-
-    if (userGestureRequiredForAudioStart()) {
-        if (!document->processingUserGestureForMedia())
-            return false;
-        removeBehaviorRestriction(WebKitAudioContext::RequireUserGestureForAudioStartRestriction);
-    }
-
-    if (pageConsentRequiredForAudioStart()) {
-        auto* page = document->page();
-        if (page && !page->canStartMedia()) {
-            document->addMediaCanStartListener(*this);
-            return false;
-        }
-        removeBehaviorRestriction(WebKitAudioContext::RequirePageConsentForAudioStartRestriction);
-    }
-
-    return m_mediaSession->clientWillPausePlayback();
-}
-
-void WebKitAudioContext::startRendering()
-{
-    ALWAYS_LOG(LOGIDENTIFIER);
-    if (m_isStopScheduled || !willBeginPlayback())
-        return;
-
-    makePendingActivity();
-
-    destination()->startRendering();
-    setState(State::Running);
-}
-
-void WebKitAudioContext::mediaCanStart(Document& document)
-{
-    ASSERT_UNUSED(document, &document == this->document());
-    removeBehaviorRestriction(WebKitAudioContext::RequirePageConsentForAudioStartRestriction);
-    mayResumePlayback(true);
-}
-
-MediaProducer::MediaStateFlags WebKitAudioContext::mediaState() const
-{
-    if (!m_isStopScheduled && m_destinationNode && m_destinationNode->isPlayingAudio())
-        return MediaProducer::IsPlayingAudio;
-
-    return MediaProducer::IsNotPlaying;
-}
-
-void WebKitAudioContext::pageMutedStateDidChange()
-{
-    if (m_destinationNode && document() && document()->page())
-        m_destinationNode->setMuted(document()->page()->isAudioMuted());
-}
-
-void WebKitAudioContext::isPlayingAudioDidChange()
-{
-    // Make sure to call Document::updateIsPlayingMedia() on the main thread, since
-    // we could be on the audio I/O thread here and the call into WebCore could block.
-    callOnMainThread([protectedThis = makeRef(*this)] {
-        if (protectedThis->document())
-            protectedThis->document()->updateIsPlayingMedia();
-    });
-}
-
-void WebKitAudioContext::finishedRendering(bool didRendering)
-{
-    ASSERT(isOfflineContext());
-    ASSERT(isMainThread());
-    if (!isMainThread())
-        return;
-
-    auto clearPendingActivityIfExitEarly = WTF::makeScopeExit([this] {
-        clearPendingActivity();
-    });
-
-
-    ALWAYS_LOG(LOGIDENTIFIER);
-
-    if (!didRendering)
-        return;
-
-    AudioBuffer* renderedBuffer = m_renderTarget.get();
-    setState(State::Closed);
-
-    ASSERT(renderedBuffer);
-    if (!renderedBuffer)
-        return;
-
-    // Avoid firing the event if the document has already gone away.
-    if (m_isStopScheduled)
-        return;
-
-    clearPendingActivityIfExitEarly.release();
-    m_eventQueue->enqueueEvent(OfflineAudioCompletionEvent::create(renderedBuffer));
-}
-
-void WebKitAudioContext::dispatchEvent(Event& event)
-{
-    EventTarget::dispatchEvent(event);
-    if (event.eventInterface() == OfflineAudioCompletionEventInterfaceType)
-        clearPendingActivity();
-}
-
-void WebKitAudioContext::incrementActiveSourceCount()
-{
-    ++m_activeSourceCount;
-}
-
-void WebKitAudioContext::decrementActiveSourceCount()
-{
-    --m_activeSourceCount;
-}
-
-void WebKitAudioContext::suspendRendering(DOMPromiseDeferred<void>&& promise)
-{
-    if (isOfflineContext() || m_isStopScheduled) {
-        promise.reject(InvalidStateError);
-        return;
-    }
-
-    if (m_state == State::Suspended) {
-        promise.resolve();
-        return;
-    }
-
-    if (m_state == State::Closed || m_state == State::Interrupted || !m_destinationNode) {
-        promise.reject();
-        return;
-    }
-
-    addReaction(State::Suspended, WTFMove(promise));
-
-    if (!willPausePlayback())
-        return;
-
-    lazyInitialize();
-
-    m_destinationNode->suspend([this, protectedThis = makeRef(*this)] {
-        setState(State::Suspended);
-    });
-}
-
-void WebKitAudioContext::resumeRendering(DOMPromiseDeferred<void>&& promise)
-{
-    if (isOfflineContext() || m_isStopScheduled) {
-        promise.reject(InvalidStateError);
-        return;
-    }
-
-    if (m_state == State::Running) {
-        promise.resolve();
-        return;
-    }
-
-    if (m_state == State::Closed || !m_destinationNode) {
-        promise.reject();
-        return;
-    }
-
-    addReaction(State::Running, WTFMove(promise));
-
-    if (!willBeginPlayback())
-        return;
-
-    lazyInitialize();
-
-    m_destinationNode->resume([this, protectedThis = makeRef(*this)] {
-        setState(State::Running);
-    });
-}
-
-void WebKitAudioContext::close(DOMPromiseDeferred<void>&& promise)
-{
-    if (isOfflineContext() || m_isStopScheduled) {
-        promise.reject(InvalidStateError);
-        return;
-    }
-
-    if (m_state == State::Closed || !m_destinationNode) {
-        promise.resolve();
-        return;
-    }
-
-    addReaction(State::Closed, WTFMove(promise));
-
-    lazyInitialize();
-
-    m_destinationNode->close([this, protectedThis = makeRef(*this)] {
-        setState(State::Closed);
-        uninitialize();
-    });
-}
-
-
-void WebKitAudioContext::suspendPlayback()
-{
-    if (!m_destinationNode || m_state == State::Closed)
-        return;
-
-    if (m_state == State::Suspended) {
-        if (m_mediaSession->state() == PlatformMediaSession::Interrupted)
-            setState(State::Interrupted);
-        return;
-    }
-
-    lazyInitialize();
-
-    m_destinationNode->suspend([this, protectedThis = makeRef(*this)] {
-        bool interrupted = m_mediaSession->state() == PlatformMediaSession::Interrupted;
-        setState(interrupted ? State::Interrupted : State::Suspended);
-    });
-}
-
-void WebKitAudioContext::mayResumePlayback(bool shouldResume)
-{
-    if (!m_destinationNode || m_state == State::Closed || m_state == State::Running)
-        return;
-
-    if (!shouldResume) {
-        setState(State::Suspended);
-        return;
-    }
-
-    if (!willBeginPlayback())
-        return;
-
-    lazyInitialize();
-
-    m_destinationNode->resume([this, protectedThis = makeRef(*this)] {
-        setState(State::Running);
-    });
-}
-
-void WebKitAudioContext::postTask(WTF::Function<void()>&& task)
-{
-    if (m_isStopScheduled)
-        return;
-
-    m_scriptExecutionContext->postTask(WTFMove(task));
-}
-
-const SecurityOrigin* WebKitAudioContext::origin() const
-{
-    return m_scriptExecutionContext ? m_scriptExecutionContext->securityOrigin() : nullptr;
-}
-
-void WebKitAudioContext::addConsoleMessage(MessageSource source, MessageLevel level, const String& message)
-{
-    if (m_scriptExecutionContext)
-        m_scriptExecutionContext->addConsoleMessage(source, level, message);
-}
-
-void WebKitAudioContext::clearPendingActivity()
-{
-    if (!m_pendingActivity)
-        return;
-    m_pendingActivity = nullptr;
-    // FIXME: Remove this specific deref() and ref() call in makePendingActivity().
-    deref();
-}
-
-void WebKitAudioContext::makePendingActivity()
-{
-    if (m_pendingActivity)
-        return;
-    m_pendingActivity = ActiveDOMObject::makePendingActivity(*this);
-    ref();
-}
-
-#if !RELEASE_LOG_DISABLED
-WTFLogChannel& WebKitAudioContext::logChannel() const
-{
-    return LogMedia;
-}
-#endif
-
 } // namespace WebCore
 
 #endif // ENABLE(WEB_AUDIO)
index e9abb79..d3d5696 100644 (file)
 
 #pragma once
 
-#include "ActiveDOMObject.h"
-#include "AsyncAudioDecoder.h"
-#include "AudioBus.h"
-#include "AudioContext.h"
-#include "AudioContextState.h"
-#include "AudioDestinationNode.h"
-#include "EventTarget.h"
-#include "MediaCanStartListener.h"
-#include "MediaProducer.h"
-#include "PlatformMediaSession.h"
-#include "ScriptExecutionContext.h"
-#include "VisibilityChangeClient.h"
-#include <JavaScriptCore/ConsoleTypes.h>
-#include <JavaScriptCore/Float32Array.h>
-#include <atomic>
-#include <wtf/HashSet.h>
-#include <wtf/LoggerHelper.h>
-#include <wtf/MainThread.h>
-#include <wtf/RefPtr.h>
-#include <wtf/ThreadSafeRefCounted.h>
-#include <wtf/Threading.h>
-#include <wtf/UniqueRef.h>
-#include <wtf/Vector.h>
-#include <wtf/text/AtomStringHash.h>
+#include "BaseAudioContext.h"
 
 namespace WebCore {
 
-class AnalyserNode;
 class AudioBuffer;
-class AudioBufferCallback;
-class AudioBufferSourceNode;
-class AudioListener;
-class AudioSummingJunction;
-class BiquadFilterNode;
-class ChannelMergerNode;
-class ChannelSplitterNode;
-class ConvolverNode;
-class DelayNode;
 class Document;
-class DynamicsCompressorNode;
-class GainNode;
 class HTMLMediaElement;
-class MainThreadGenericEventQueue;
 class MediaElementAudioSourceNode;
 class MediaStream;
 class MediaStreamAudioDestinationNode;
 class MediaStreamAudioSourceNode;
-class OscillatorNode;
-class PeriodicWave;
-class ScriptProcessorNode;
-class SecurityOrigin;
-class WaveShaperNode;
 class WebKitAudioPannerNode;
 
-template<typename IDLType> class DOMPromiseDeferred;
-
 // AudioContext is the cornerstone of the web audio API and all AudioNodes are created from it.
 // For thread safety between the audio thread and the main thread, it has a rendering graph locking mechanism.
 
 class WebKitAudioContext
-    : public AudioContextBase
-    , private PlatformMediaSessionClient
-    , private VisibilityChangeClient
+    : public BaseAudioContext
 {
     WTF_MAKE_ISO_ALLOCATED(WebKitAudioContext);
 public:
     // Create an WebKitAudioContext for rendering to the audio hardware.
     static ExceptionOr<Ref<WebKitAudioContext>> create(Document&);
 
-    virtual ~WebKitAudioContext();
-
-    bool isInitialized() const final;
-
-    bool isOfflineContext() const final { return m_isOfflineContext; }
-
-    DocumentIdentifier hostingDocumentIdentifier() const final;
-
-    AudioDestinationNode* destination() { return m_destinationNode.get(); }
-    size_t currentSampleFrame() const final { return m_destinationNode ? m_destinationNode->currentSampleFrame() : 0; }
-    double currentTime() const final { return m_destinationNode ? m_destinationNode->currentTime() : 0.; }
-    float sampleRate() const final { return m_destinationNode ? m_destinationNode->sampleRate() : 0.f; }
-    unsigned long activeSourceCount() const { return static_cast<unsigned long>(m_activeSourceCount); }
-
-    void incrementActiveSourceCount() final;
-    void decrementActiveSourceCount() final;
-
-    ExceptionOr<Ref<AudioBuffer>> createBuffer(unsigned numberOfChannels, size_t numberOfFrames, float sampleRate);
-    ExceptionOr<Ref<AudioBuffer>> createBuffer(ArrayBuffer&, bool mixToMono);
-
-    // Asynchronous audio file data decoding.
-    void decodeAudioData(Ref<ArrayBuffer>&&, RefPtr<AudioBufferCallback>&&, RefPtr<AudioBufferCallback>&&);
-
-    AudioListener* listener() { return m_listener.get(); }
-
-    void suspendRendering(DOMPromiseDeferred<void>&&);
-    void resumeRendering(DOMPromiseDeferred<void>&&);
     void close(DOMPromiseDeferred<void>&&);
 
-    using State = AudioContextState;
-    State state() const;
-    bool isClosed() const { return m_state == State::Closed; }
-
-    bool wouldTaintOrigin(const URL&) const;
-
     // The AudioNode create methods are called on the main thread (from JavaScript).
-    ExceptionOr<Ref<AudioBufferSourceNode>> createBufferSource();
 #if ENABLE(VIDEO)
     ExceptionOr<Ref<MediaElementAudioSourceNode>> createMediaElementSource(HTMLMediaElement&);
 #endif
@@ -138,280 +59,21 @@ public:
     ExceptionOr<Ref<MediaStreamAudioSourceNode>> createMediaStreamSource(MediaStream&);
     ExceptionOr<Ref<MediaStreamAudioDestinationNode>> createMediaStreamDestination();
 #endif
-    ExceptionOr<Ref<GainNode>> createGain();
-    ExceptionOr<Ref<BiquadFilterNode>> createBiquadFilter();
-    ExceptionOr<Ref<WaveShaperNode>> createWaveShaper();
-    ExceptionOr<Ref<DelayNode>> createDelay(double maxDelayTime);
-    ExceptionOr<Ref<WebKitAudioPannerNode>> createPanner();
-    ExceptionOr<Ref<ConvolverNode>> createConvolver();
-    ExceptionOr<Ref<DynamicsCompressorNode>> createDynamicsCompressor();
-    ExceptionOr<Ref<AnalyserNode>> createAnalyser();
-    ExceptionOr<Ref<ScriptProcessorNode>> createScriptProcessor(size_t bufferSize, size_t numberOfInputChannels, size_t numberOfOutputChannels);
-    ExceptionOr<Ref<ChannelSplitterNode>> createChannelSplitter(size_t numberOfOutputs);
-    ExceptionOr<Ref<ChannelMergerNode>> createChannelMerger(size_t numberOfInputs);
-    ExceptionOr<Ref<OscillatorNode>> createOscillator();
-    ExceptionOr<Ref<PeriodicWave>> createPeriodicWave(Float32Array& real, Float32Array& imaginary);
-
-    // When a source node has no more processing to do (has finished playing), then it tells the context to dereference it.
-    void notifyNodeFinishedProcessing(AudioNode*) final;
-
-    // Called at the start of each render quantum.
-    void handlePreRenderTasks() final;
-
-    // Called at the end of each render quantum.
-    void handlePostRenderTasks() final;
-
-    // Called periodically at the end of each render quantum to dereference finished source nodes.
-    void derefFinishedSourceNodes();
-
-    // We schedule deletion of all marked nodes at the end of each realtime render quantum.
-    void markForDeletion(AudioNode&) final;
-    void deleteMarkedNodes() final;
-
-    // AudioContext can pull node(s) at the end of each render quantum even when they are not connected to any downstream nodes.
-    // These two methods are called by the nodes who want to add/remove themselves into/from the automatic pull lists.
-    void addAutomaticPullNode(AudioNode&) final;
-    void removeAutomaticPullNode(AudioNode&) final;
-
-    // Called right before handlePostRenderTasks() to handle nodes which need to be pulled even when they are not connected to anything.
-    void processAutomaticPullNodes(size_t framesToProcess) final;
-
-    // Keeps track of the number of connections made.
-    void incrementConnectionCount() final
-    {
-        ASSERT(isMainThread());
-        m_connectionCount++;
-    }
-
-    unsigned connectionCount() const { return m_connectionCount; }
-
-    //
-    // Thread Safety and Graph Locking:
-    //
-
-    void setAudioThread(Thread& thread) final { m_audioThread = &thread; } // FIXME: check either not initialized or the same
-    Thread* audioThread() const { return m_audioThread; }
-    bool isAudioThread() const final;
-
-    // Returns true only after the audio thread has been started and then shutdown.
-    bool isAudioThreadFinished() final { return m_isAudioThreadFinished; }
-
-    // mustReleaseLock is set to true if we acquired the lock in this method call and caller must unlock(), false if it was previously acquired.
-    void lock(bool& mustReleaseLock) final;
-
-    // Returns true if we own the lock.
-    // mustReleaseLock is set to true if we acquired the lock in this method call and caller must unlock(), false if it was previously acquired.
-    bool tryLock(bool& mustReleaseLock) final;
-
-    void unlock() final;
-
-    // Returns true if this thread owns the context's lock.
-    bool isGraphOwner() const final;
-
-    // Returns the maximum number of channels we can support.
-    static unsigned maxNumberOfChannels() { return MaxNumberOfChannels; }
-
-    // In AudioNode::deref() a tryLock() is used for calling finishDeref(), but if it fails keep track here.
-    void addDeferredFinishDeref(AudioNode*) final;
-
-    // In the audio thread at the start of each render cycle, we'll call handleDeferredFinishDerefs().
-    void handleDeferredFinishDerefs();
-
-    // Only accessed when the graph lock is held.
-    void markSummingJunctionDirty(AudioSummingJunction*) final;
-    void markAudioNodeOutputDirty(AudioNodeOutput*) final;
-
-    // Must be called on main thread.
-    void removeMarkedSummingJunction(AudioSummingJunction*) final;
-
-    // EventTarget
-    EventTargetInterface eventTargetInterface() const final;
-
-    void startRendering();
-    void finishedRendering(bool didRendering) final;
-
-    static unsigned s_hardwareContextCount;
-
-    // Restrictions to change default behaviors.
-    BehaviorRestrictions behaviorRestrictions() const final { return m_restrictions; }
-    void addBehaviorRestriction(BehaviorRestrictions restriction) final { m_restrictions |= restriction; }
-    void removeBehaviorRestriction(BehaviorRestrictions restriction) final { m_restrictions &= ~restriction; }
-
-    void isPlayingAudioDidChange() final;
-
-    void nodeWillBeginPlayback() final;
-
-#if !RELEASE_LOG_DISABLED
-    const Logger& logger() const final { return m_logger.get(); }
-    const void* logIdentifier() const final { return m_logIdentifier; }
-    WTFLogChannel& logChannel() const final;
-    const void* nextAudioNodeLogIdentifier() final { return childLogIdentifier(m_logIdentifier, ++m_nextAudioNodeIdentifier); }
-    const void* nextAudioParameterLogIdentifier() final { return childLogIdentifier(m_logIdentifier, ++m_nextAudioParameterIdentifier); }
-#endif
-
-    void postTask(WTF::Function<void()>&&) final;
-    bool isStopped() const final { return m_isStopScheduled; }
-    const SecurityOrigin* origin() const final;
-    void addConsoleMessage(MessageSource, MessageLevel, const String& message) final;
+    ExceptionOr<Ref<WebKitAudioPannerNode>> createWebKitPanner();
 
 protected:
     explicit WebKitAudioContext(Document&);
     WebKitAudioContext(Document&, AudioBuffer* renderTarget);
 
-    static bool isSampleRateRangeGood(float sampleRate);
-    void clearPendingActivity();
-    void makePendingActivity();
-
 private:
-    void constructCommon();
-
-    void lazyInitialize();
-    void uninitialize();
-
-    bool willBeginPlayback();
-    bool willPausePlayback();
-
-    bool userGestureRequiredForAudioStart() const { return !isOfflineContext() && m_restrictions & RequireUserGestureForAudioStartRestriction; }
-    bool pageConsentRequiredForAudioStart() const { return !isOfflineContext() && m_restrictions & RequirePageConsentForAudioStartRestriction; }
-
-    void setState(State);
-
-    void clear();
-
-    void scheduleNodeDeletion();
-
-    void mediaCanStart(Document&) override;
-
-    // EventTarget
-    void dispatchEvent(Event&) final;
-
-    // MediaProducer
-    MediaProducer::MediaStateFlags mediaState() const override;
-    void pageMutedStateDidChange() override;
-
-    // The context itself keeps a reference to all source nodes.  The source nodes, then reference all nodes they're connected to.
-    // In turn, these nodes reference all nodes they're connected to.  All nodes are ultimately connected to the AudioDestinationNode.
-    // When the context dereferences a source node, it will be deactivated from the rendering graph along with all other nodes it is
-    // uniquely connected to.  See the AudioNode::ref() and AudioNode::deref() methods for more details.
-    void refNode(AudioNode&);
-    void derefNode(AudioNode&);
-
     // ActiveDOMObject API.
-    void suspend(ReasonForSuspension) final;
-    void resume() final;
-    void stop() override;
     const char* activeDOMObjectName() const override;
 
-    // When the context goes away, there might still be some sources which haven't finished playing.
-    // Make sure to dereference them here.
-    void derefUnfinishedSourceNodes();
-
-    // PlatformMediaSessionClient
-    PlatformMediaSession::MediaType mediaType() const override { return PlatformMediaSession::MediaType::WebAudio; }
-    PlatformMediaSession::MediaType presentationType() const override { return PlatformMediaSession::MediaType::WebAudio; }
-    void mayResumePlayback(bool shouldResume) override;
-    void suspendPlayback() override;
-    bool canReceiveRemoteControlCommands() const override { return false; }
-    void didReceiveRemoteControlCommand(PlatformMediaSession::RemoteControlCommandType, const PlatformMediaSession::RemoteCommandArgument*) override { }
-    bool supportsSeeking() const override { return false; }
-    bool shouldOverrideBackgroundPlaybackRestriction(PlatformMediaSession::InterruptionType) const override { return false; }
-    bool canProduceAudio() const final { return true; }
-    bool isSuspended() const final;
-
-    void visibilityStateChanged() final;
-
-    bool isBaseAudioContext() const final { return false; }
     bool isWebKitAudioContext() const final { return true; }
-
-    void handleDirtyAudioSummingJunctions();
-    void handleDirtyAudioNodeOutputs();
-
-    void addReaction(State, DOMPromiseDeferred<void>&&);
-    void updateAutomaticPullNodes();
-
-#if !RELEASE_LOG_DISABLED
-    const char* logClassName() const final { return "WebKitAudioContext"; }
-
-    Ref<Logger> m_logger;
-    const void* m_logIdentifier;
-    uint64_t m_nextAudioNodeIdentifier { 0 };
-    uint64_t m_nextAudioParameterIdentifier { 0 };
-#endif
-
-    // Only accessed in the audio thread.
-    Vector<AudioNode*> m_finishedNodes;
-
-    // We don't use RefPtr<AudioNode> here because AudioNode has a more complex ref() / deref() implementation
-    // with an optional argument for refType.  We need to use the special refType: RefTypeConnection
-    // Either accessed when the graph lock is held, or on the main thread when the audio thread has finished.
-    Vector<AudioNode*> m_referencedNodes;
-
-    // Accumulate nodes which need to be deleted here.
-    // This is copied to m_nodesToDelete at the end of a render cycle in handlePostRenderTasks(), where we're assured of a stable graph
-    // state which will have no references to any of the nodes in m_nodesToDelete once the context lock is released
-    // (when handlePostRenderTasks() has completed).
-    Vector<AudioNode*> m_nodesMarkedForDeletion;
-
-    // They will be scheduled for deletion (on the main thread) at the end of a render cycle (in realtime thread).
-    Vector<AudioNode*> m_nodesToDelete;
-
-    bool m_isDeletionScheduled { false };
-    bool m_isStopScheduled { false };
-    bool m_isInitialized { false };
-    bool m_isAudioThreadFinished { false };
-    bool m_automaticPullNodesNeedUpdating { false };
-    bool m_isOfflineContext { false };
-
-    // Only accessed when the graph lock is held.
-    HashSet<AudioSummingJunction*> m_dirtySummingJunctions;
-    HashSet<AudioNodeOutput*> m_dirtyAudioNodeOutputs;
-
-    // For the sake of thread safety, we maintain a seperate Vector of automatic pull nodes for rendering in m_renderingAutomaticPullNodes.
-    // It will be copied from m_automaticPullNodes by updateAutomaticPullNodes() at the very start or end of the rendering quantum.
-    HashSet<AudioNode*> m_automaticPullNodes;
-    Vector<AudioNode*> m_renderingAutomaticPullNodes;
-    // Only accessed in the audio thread.
-    Vector<AudioNode*> m_deferredFinishDerefList;
-    Vector<Vector<DOMPromiseDeferred<void>>> m_stateReactions;
-
-    std::unique_ptr<PlatformMediaSession> m_mediaSession;
-    UniqueRef<MainThreadGenericEventQueue> m_eventQueue;
-
-    RefPtr<AudioBuffer> m_renderTarget;
-    RefPtr<AudioDestinationNode> m_destinationNode;
-    RefPtr<AudioListener> m_listener;
-
-    unsigned m_connectionCount { 0 };
-
-    // Graph locking.
-    Lock m_contextGraphMutex;
-    // FIXME: Using volatile seems incorrect.
-    // https://bugs.webkit.org/show_bug.cgi?id=180332
-    Thread* volatile m_audioThread { nullptr };
-    Thread* volatile m_graphOwnerThread { nullptr }; // if the lock is held then this is the thread which owns it, otherwise == nullptr.
-
-    std::unique_ptr<AsyncAudioDecoder> m_audioDecoder;
-
-    // This is considering 32 is large enough for multiple channels audio.
-    // It is somewhat arbitrary and could be increased if necessary.
-    enum { MaxNumberOfChannels = 32 };
-
-    // Number of AudioBufferSourceNodes that are active (playing).
-    std::atomic<int> m_activeSourceCount { 0 };
-
-    BehaviorRestrictions m_restrictions { NoRestrictions };
-
-    State m_state { State::Suspended };
-    RefPtr<PendingActivity<WebKitAudioContext>> m_pendingActivity;
 };
 
-inline AudioContextState WebKitAudioContext::state() const
-{
-    return m_state;
-}
-
 } // WebCore
 
 SPECIALIZE_TYPE_TRAITS_BEGIN(WebCore::WebKitAudioContext)
-    static bool isType(const WebCore::AudioContextBase& context) { return context.isWebKitAudioContext(); }
+    static bool isType(const WebCore::BaseAudioContext& context) { return context.isWebKitAudioContext(); }
 SPECIALIZE_TYPE_TRAITS_END()
index 9b5a6ff..f6eea6c 100644 (file)
@@ -74,7 +74,7 @@
     [MayThrowException] DelayNode createDelay(optional unrestricted double maxDelayTime = 1);
     [MayThrowException] BiquadFilterNode createBiquadFilter();
     [MayThrowException] WaveShaperNode createWaveShaper();
-    [MayThrowException] WebKitAudioPannerNode createPanner();
+    [MayThrowException, ImplementedAs=createWebKitPanner] WebKitAudioPannerNode createPanner();
     [MayThrowException] ConvolverNode createConvolver();
     [MayThrowException] DynamicsCompressorNode createDynamicsCompressor();
     [MayThrowException] AnalyserNode createAnalyser();
index e5ae5d6..1dd2f3e 100644 (file)
@@ -53,7 +53,6 @@ VideoTrackList conditional=VIDEO
 VisualViewport
 WebAnimation
 WebGPUDevice conditional=WEBGPU
-WebKitAudioContext conditional=WEB_AUDIO
 WebKitMediaKeySession conditional=LEGACY_ENCRYPTED_MEDIA
 WebSocket
 Worker
index 6721872..d52d38f 100644 (file)
@@ -4301,7 +4301,7 @@ void Internals::sendMediaControlEvent(MediaControlEvent event)
 #if ENABLE(WEB_AUDIO)
 void Internals::setAudioContextRestrictions(const Variant<RefPtr<BaseAudioContext>, RefPtr<WebKitAudioContext>>& contextVariant, StringView restrictionsString)
 {
-    RefPtr<AudioContextBase> context;
+    RefPtr<BaseAudioContext> context;
     switchOn(contextVariant, [&](RefPtr<BaseAudioContext> entry) {
         context = entry;
     }, [&](RefPtr<WebKitAudioContext> entry) {
@@ -4311,15 +4311,15 @@ void Internals::setAudioContextRestrictions(const Variant<RefPtr<BaseAudioContex
     auto restrictions = context->behaviorRestrictions();
     context->removeBehaviorRestriction(restrictions);
 
-    restrictions = AudioContextBase::NoRestrictions;
+    restrictions = BaseAudioContext::NoRestrictions;
 
     for (StringView restrictionString : restrictionsString.split(',')) {
         if (equalLettersIgnoringASCIICase(restrictionString, "norestrictions"))
-            restrictions |= AudioContextBase::NoRestrictions;
+            restrictions |= BaseAudioContext::NoRestrictions;
         if (equalLettersIgnoringASCIICase(restrictionString, "requireusergestureforaudiostart"))
-            restrictions |= AudioContextBase::RequireUserGestureForAudioStartRestriction;
+            restrictions |= BaseAudioContext::RequireUserGestureForAudioStartRestriction;
         if (equalLettersIgnoringASCIICase(restrictionString, "requirepageconsentforaudiostart"))
-            restrictions |= AudioContextBase::RequirePageConsentForAudioStartRestriction;
+            restrictions |= BaseAudioContext::RequirePageConsentForAudioStartRestriction;
     }
     context->addBehaviorRestriction(restrictions);
 }