Introduce BaseAudioContext interface
authorcdumez@apple.com <cdumez@apple.com@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Tue, 23 Jun 2020 03:25:56 +0000 (03:25 +0000)
committercdumez@apple.com <cdumez@apple.com@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Tue, 23 Jun 2020 03:25:56 +0000 (03:25 +0000)
https://bugs.webkit.org/show_bug.cgi?id=213491

Reviewed by Geoffrey Garen.

LayoutTests/imported/w3c:

Rebaseline WPT test now that more checks are passing.

* web-platform-tests/webaudio/idlharness.https.window-expected.txt:

Source/WebCore:

Introduce BaseAudioContext interface as per W3C WebAudio specification:
- https://www.w3.org/TR/webaudio/#BaseAudioContext

No new tests, rebaselined existing test.

* CMakeLists.txt:
* DerivedSources-input.xcfilelist:
* DerivedSources-output.xcfilelist:
* DerivedSources.make:
* Modules/webaudio/AudioContext.cpp:
(WebCore::AudioContext::AudioContext):
(WebCore::AudioContext::close):
(WebCore::AudioContext::createMediaElementSource):
(WebCore::AudioContext::createMediaStreamSource):
(WebCore::AudioContext::createMediaStreamDestination):
* Modules/webaudio/AudioContext.h:
* Modules/webaudio/AudioContext.idl:
* Modules/webaudio/AudioNode.cpp:
(WebCore::AudioNode::contextForBindings const):
* Modules/webaudio/AudioNode.h:
* Modules/webaudio/AudioNode.idl:
* Modules/webaudio/BaseAudioContext.cpp: Copied from Source/WebCore/Modules/webaudio/AudioContext.cpp.
(WebCore::BaseAudioContext::isSampleRateRangeGood):
(WebCore::AudioContextBase::AudioContextBase):
(WebCore::BaseAudioContext::BaseAudioContext):
(WebCore::BaseAudioContext::constructCommon):
(WebCore::BaseAudioContext::~BaseAudioContext):
(WebCore::BaseAudioContext::lazyInitialize):
(WebCore::BaseAudioContext::clear):
(WebCore::BaseAudioContext::uninitialize):
(WebCore::BaseAudioContext::isInitialized const):
(WebCore::BaseAudioContext::addReaction):
(WebCore::BaseAudioContext::setState):
(WebCore::BaseAudioContext::stop):
(WebCore::BaseAudioContext::suspend):
(WebCore::BaseAudioContext::resume):
(WebCore::BaseAudioContext::activeDOMObjectName const):
(WebCore::AudioContextBase::document const):
(WebCore::BaseAudioContext::hostingDocumentIdentifier const):
(WebCore::BaseAudioContext::isSuspended const):
(WebCore::BaseAudioContext::visibilityStateChanged):
(WebCore::BaseAudioContext::wouldTaintOrigin const):
(WebCore::BaseAudioContext::createBuffer):
(WebCore::BaseAudioContext::decodeAudioData):
(WebCore::BaseAudioContext::createBufferSource):
(WebCore::BaseAudioContext::createScriptProcessor):
(WebCore::BaseAudioContext::createBiquadFilter):
(WebCore::BaseAudioContext::createWaveShaper):
(WebCore::BaseAudioContext::createPanner):
(WebCore::BaseAudioContext::createConvolver):
(WebCore::BaseAudioContext::createDynamicsCompressor):
(WebCore::BaseAudioContext::createAnalyser):
(WebCore::BaseAudioContext::createGain):
(WebCore::BaseAudioContext::createDelay):
(WebCore::BaseAudioContext::createChannelSplitter):
(WebCore::BaseAudioContext::createChannelMerger):
(WebCore::BaseAudioContext::createOscillator):
(WebCore::BaseAudioContext::createPeriodicWave):
(WebCore::BaseAudioContext::notifyNodeFinishedProcessing):
(WebCore::BaseAudioContext::derefFinishedSourceNodes):
(WebCore::BaseAudioContext::refNode):
(WebCore::BaseAudioContext::derefNode):
(WebCore::BaseAudioContext::derefUnfinishedSourceNodes):
(WebCore::BaseAudioContext::lock):
(WebCore::BaseAudioContext::tryLock):
(WebCore::BaseAudioContext::unlock):
(WebCore::BaseAudioContext::isAudioThread const):
(WebCore::BaseAudioContext::isGraphOwner const):
(WebCore::BaseAudioContext::addDeferredFinishDeref):
(WebCore::BaseAudioContext::handlePreRenderTasks):
(WebCore::BaseAudioContext::handlePostRenderTasks):
(WebCore::BaseAudioContext::handleDeferredFinishDerefs):
(WebCore::BaseAudioContext::markForDeletion):
(WebCore::BaseAudioContext::scheduleNodeDeletion):
(WebCore::BaseAudioContext::deleteMarkedNodes):
(WebCore::BaseAudioContext::markSummingJunctionDirty):
(WebCore::BaseAudioContext::removeMarkedSummingJunction):
(WebCore::BaseAudioContext::eventTargetInterface const):
(WebCore::BaseAudioContext::markAudioNodeOutputDirty):
(WebCore::BaseAudioContext::handleDirtyAudioSummingJunctions):
(WebCore::BaseAudioContext::handleDirtyAudioNodeOutputs):
(WebCore::BaseAudioContext::addAutomaticPullNode):
(WebCore::BaseAudioContext::removeAutomaticPullNode):
(WebCore::BaseAudioContext::updateAutomaticPullNodes):
(WebCore::BaseAudioContext::processAutomaticPullNodes):
(WebCore::AudioContextBase::scriptExecutionContext const):
(WebCore::BaseAudioContext::nodeWillBeginPlayback):
(WebCore::shouldDocumentAllowWebAudioToAutoPlay):
(WebCore::BaseAudioContext::willBeginPlayback):
(WebCore::BaseAudioContext::willPausePlayback):
(WebCore::BaseAudioContext::startRendering):
(WebCore::BaseAudioContext::mediaCanStart):
(WebCore::BaseAudioContext::mediaState const):
(WebCore::BaseAudioContext::pageMutedStateDidChange):
(WebCore::BaseAudioContext::isPlayingAudioDidChange):
(WebCore::BaseAudioContext::finishedRendering):
(WebCore::BaseAudioContext::dispatchEvent):
(WebCore::BaseAudioContext::incrementActiveSourceCount):
(WebCore::BaseAudioContext::decrementActiveSourceCount):
(WebCore::BaseAudioContext::suspendRendering):
(WebCore::BaseAudioContext::resumeRendering):
(WebCore::BaseAudioContext::suspendPlayback):
(WebCore::BaseAudioContext::mayResumePlayback):
(WebCore::BaseAudioContext::postTask):
(WebCore::BaseAudioContext::origin const):
(WebCore::BaseAudioContext::addConsoleMessage):
(WebCore::BaseAudioContext::clearPendingActivity):
(WebCore::BaseAudioContext::makePendingActivity):
(WebCore::BaseAudioContext::logChannel const):
* Modules/webaudio/BaseAudioContext.h: Copied from Source/WebCore/Modules/webaudio/AudioContext.h.
(WebCore::AudioContextBase::AutoLocker::AutoLocker):
(WebCore::AudioContextBase::AutoLocker::~AutoLocker):
(WebCore::BaseAudioContext::destination):
(WebCore::BaseAudioContext::activeSourceCount const):
(WebCore::BaseAudioContext::listener):
(WebCore::BaseAudioContext::state const):
(WebCore::BaseAudioContext::isClosed const):
(WebCore::BaseAudioContext::connectionCount const):
(WebCore::BaseAudioContext::audioThread const):
(WebCore::BaseAudioContext::maxNumberOfChannels):
(WebCore::BaseAudioContext::destinationNode const):
(WebCore::BaseAudioContext::userGestureRequiredForAudioStart const):
(WebCore::BaseAudioContext::pageConsentRequiredForAudioStart const):
(isType):
* Modules/webaudio/BaseAudioContext.idl: Copied from Source/WebCore/Modules/webaudio/AudioContext.idl.
* Modules/webaudio/OfflineAudioContext.cpp:
(WebCore::OfflineAudioContext::OfflineAudioContext):
* Modules/webaudio/OfflineAudioContext.h:
* Modules/webaudio/OfflineAudioContext.idl:
* Modules/webaudio/PannerNode.cpp:
(WebCore::PannerNode::PannerNode):
* Modules/webaudio/PannerNode.h:
* Sources.txt:
* WebCore.xcodeproj/project.pbxproj:
* bindings/js/WebCoreBuiltinNames.h:
* dom/EventTargetFactory.in:
* testing/Internals.cpp:
(WebCore::Internals::setAudioContextRestrictions):
* testing/Internals.h:
* testing/Internals.idl:

git-svn-id: https://svn.webkit.org/repository/webkit/trunk@263381 268f45cc-cd09-0410-ab3c-d52691b4dbfc

29 files changed:
LayoutTests/imported/w3c/ChangeLog
LayoutTests/imported/w3c/web-platform-tests/webaudio/idlharness.https.window-expected.txt
Source/WebCore/CMakeLists.txt
Source/WebCore/ChangeLog
Source/WebCore/DerivedSources-input.xcfilelist
Source/WebCore/DerivedSources-output.xcfilelist
Source/WebCore/DerivedSources.make
Source/WebCore/Modules/webaudio/AudioContext.cpp
Source/WebCore/Modules/webaudio/AudioContext.h
Source/WebCore/Modules/webaudio/AudioContext.idl
Source/WebCore/Modules/webaudio/AudioNode.cpp
Source/WebCore/Modules/webaudio/AudioNode.h
Source/WebCore/Modules/webaudio/AudioNode.idl
Source/WebCore/Modules/webaudio/BaseAudioContext.cpp [new file with mode: 0644]
Source/WebCore/Modules/webaudio/BaseAudioContext.h [new file with mode: 0644]
Source/WebCore/Modules/webaudio/BaseAudioContext.idl [new file with mode: 0644]
Source/WebCore/Modules/webaudio/OfflineAudioContext.cpp
Source/WebCore/Modules/webaudio/OfflineAudioContext.h
Source/WebCore/Modules/webaudio/OfflineAudioContext.idl
Source/WebCore/Modules/webaudio/PannerNode.cpp
Source/WebCore/Modules/webaudio/PannerNode.h
Source/WebCore/Modules/webaudio/WebKitAudioContext.h
Source/WebCore/Sources.txt
Source/WebCore/WebCore.xcodeproj/project.pbxproj
Source/WebCore/bindings/js/WebCoreBuiltinNames.h
Source/WebCore/dom/EventTargetFactory.in
Source/WebCore/testing/Internals.cpp
Source/WebCore/testing/Internals.h
Source/WebCore/testing/Internals.idl

index a7e241b..6dd2995 100644 (file)
@@ -1,3 +1,14 @@
+2020-06-22  Chris Dumez  <cdumez@apple.com>
+
+        Introduce BaseAudioContext interface
+        https://bugs.webkit.org/show_bug.cgi?id=213491
+
+        Reviewed by Geoffrey Garen.
+
+        Rebaseline WPT test now that more checks are passing.
+
+        * web-platform-tests/webaudio/idlharness.https.window-expected.txt:
+
 2020-06-22  Clark Wang  <clark_wang@apple.com>
 
         Added getFloatTimeDomainData method to AnalyserNode
index c68a4cb..cd94d57 100644 (file)
@@ -9,42 +9,42 @@ PASS Element includes ParentNode: member names are unique
 PASS Element includes NonDocumentTypeChildNode: member names are unique 
 PASS Element includes ChildNode: member names are unique 
 PASS Element includes Slottable: member names are unique 
-FAIL BaseAudioContext interface: existence and properties of interface object assert_own_property: self does not have own property "BaseAudioContext" expected property "BaseAudioContext" missing
-FAIL BaseAudioContext interface object length assert_own_property: self does not have own property "BaseAudioContext" expected property "BaseAudioContext" missing
-FAIL BaseAudioContext interface object name assert_own_property: self does not have own property "BaseAudioContext" expected property "BaseAudioContext" missing
-FAIL BaseAudioContext interface: existence and properties of interface prototype object assert_own_property: self does not have own property "BaseAudioContext" expected property "BaseAudioContext" missing
-FAIL BaseAudioContext interface: existence and properties of interface prototype object's "constructor" property assert_own_property: self does not have own property "BaseAudioContext" expected property "BaseAudioContext" missing
-FAIL BaseAudioContext interface: existence and properties of interface prototype object's @@unscopables property assert_own_property: self does not have own property "BaseAudioContext" expected property "BaseAudioContext" missing
-FAIL BaseAudioContext interface: attribute destination assert_own_property: self does not have own property "BaseAudioContext" expected property "BaseAudioContext" missing
-FAIL BaseAudioContext interface: attribute sampleRate assert_own_property: self does not have own property "BaseAudioContext" expected property "BaseAudioContext" missing
-FAIL BaseAudioContext interface: attribute currentTime assert_own_property: self does not have own property "BaseAudioContext" expected property "BaseAudioContext" missing
-FAIL BaseAudioContext interface: attribute listener assert_own_property: self does not have own property "BaseAudioContext" expected property "BaseAudioContext" missing
-FAIL BaseAudioContext interface: attribute state assert_own_property: self does not have own property "BaseAudioContext" expected property "BaseAudioContext" missing
-FAIL BaseAudioContext interface: attribute audioWorklet assert_own_property: self does not have own property "BaseAudioContext" expected property "BaseAudioContext" missing
-FAIL BaseAudioContext interface: attribute onstatechange assert_own_property: self does not have own property "BaseAudioContext" expected property "BaseAudioContext" missing
-FAIL BaseAudioContext interface: operation createAnalyser() assert_own_property: self does not have own property "BaseAudioContext" expected property "BaseAudioContext" missing
-FAIL BaseAudioContext interface: operation createBiquadFilter() assert_own_property: self does not have own property "BaseAudioContext" expected property "BaseAudioContext" missing
-FAIL BaseAudioContext interface: operation createBuffer(unsigned long, unsigned long, float) assert_own_property: self does not have own property "BaseAudioContext" expected property "BaseAudioContext" missing
-FAIL BaseAudioContext interface: operation createBufferSource() assert_own_property: self does not have own property "BaseAudioContext" expected property "BaseAudioContext" missing
-FAIL BaseAudioContext interface: operation createChannelMerger(optional unsigned long) assert_own_property: self does not have own property "BaseAudioContext" expected property "BaseAudioContext" missing
-FAIL BaseAudioContext interface: operation createChannelSplitter(optional unsigned long) assert_own_property: self does not have own property "BaseAudioContext" expected property "BaseAudioContext" missing
-FAIL BaseAudioContext interface: operation createConstantSource() assert_own_property: self does not have own property "BaseAudioContext" expected property "BaseAudioContext" missing
-FAIL BaseAudioContext interface: operation createConvolver() assert_own_property: self does not have own property "BaseAudioContext" expected property "BaseAudioContext" missing
-FAIL BaseAudioContext interface: operation createDelay(optional double) assert_own_property: self does not have own property "BaseAudioContext" expected property "BaseAudioContext" missing
-FAIL BaseAudioContext interface: operation createDynamicsCompressor() assert_own_property: self does not have own property "BaseAudioContext" expected property "BaseAudioContext" missing
-FAIL BaseAudioContext interface: operation createGain() assert_own_property: self does not have own property "BaseAudioContext" expected property "BaseAudioContext" missing
-FAIL BaseAudioContext interface: operation createIIRFilter(sequence<double>, sequence<double>) assert_own_property: self does not have own property "BaseAudioContext" expected property "BaseAudioContext" missing
-FAIL BaseAudioContext interface: operation createOscillator() assert_own_property: self does not have own property "BaseAudioContext" expected property "BaseAudioContext" missing
-FAIL BaseAudioContext interface: operation createPanner() assert_own_property: self does not have own property "BaseAudioContext" expected property "BaseAudioContext" missing
-FAIL BaseAudioContext interface: operation createPeriodicWave(sequence<float>, sequence<float>, optional PeriodicWaveConstraints) assert_own_property: self does not have own property "BaseAudioContext" expected property "BaseAudioContext" missing
-FAIL BaseAudioContext interface: operation createScriptProcessor(optional unsigned long, optional unsigned long, optional unsigned long) assert_own_property: self does not have own property "BaseAudioContext" expected property "BaseAudioContext" missing
-FAIL BaseAudioContext interface: operation createStereoPanner() assert_own_property: self does not have own property "BaseAudioContext" expected property "BaseAudioContext" missing
-FAIL BaseAudioContext interface: operation createWaveShaper() assert_own_property: self does not have own property "BaseAudioContext" expected property "BaseAudioContext" missing
-FAIL BaseAudioContext interface: operation decodeAudioData(ArrayBuffer, optional DecodeSuccessCallback?, optional DecodeErrorCallback?) assert_own_property: self does not have own property "BaseAudioContext" expected property "BaseAudioContext" missing
-FAIL AudioContext interface: existence and properties of interface object assert_own_property: self does not have own property "BaseAudioContext" expected property "BaseAudioContext" missing
+PASS BaseAudioContext interface: existence and properties of interface object 
+PASS BaseAudioContext interface object length 
+PASS BaseAudioContext interface object name 
+PASS BaseAudioContext interface: existence and properties of interface prototype object 
+PASS BaseAudioContext interface: existence and properties of interface prototype object's "constructor" property 
+PASS BaseAudioContext interface: existence and properties of interface prototype object's @@unscopables property 
+PASS BaseAudioContext interface: attribute destination 
+PASS BaseAudioContext interface: attribute sampleRate 
+PASS BaseAudioContext interface: attribute currentTime 
+PASS BaseAudioContext interface: attribute listener 
+PASS BaseAudioContext interface: attribute state 
+FAIL BaseAudioContext interface: attribute audioWorklet assert_true: The prototype object must have a property "audioWorklet" expected true got false
+PASS BaseAudioContext interface: attribute onstatechange 
+PASS BaseAudioContext interface: operation createAnalyser() 
+PASS BaseAudioContext interface: operation createBiquadFilter() 
+FAIL BaseAudioContext interface: operation createBuffer(unsigned long, unsigned long, float) assert_equals: property has wrong .length expected 3 but got 2
+PASS BaseAudioContext interface: operation createBufferSource() 
+PASS BaseAudioContext interface: operation createChannelMerger(optional unsigned long) 
+PASS BaseAudioContext interface: operation createChannelSplitter(optional unsigned long) 
+FAIL BaseAudioContext interface: operation createConstantSource() assert_own_property: interface prototype object missing non-static operation expected property "createConstantSource" missing
+PASS BaseAudioContext interface: operation createConvolver() 
+PASS BaseAudioContext interface: operation createDelay(optional double) 
+PASS BaseAudioContext interface: operation createDynamicsCompressor() 
+PASS BaseAudioContext interface: operation createGain() 
+FAIL BaseAudioContext interface: operation createIIRFilter(sequence<double>, sequence<double>) assert_own_property: interface prototype object missing non-static operation expected property "createIIRFilter" missing
+PASS BaseAudioContext interface: operation createOscillator() 
+PASS BaseAudioContext interface: operation createPanner() 
+PASS BaseAudioContext interface: operation createPeriodicWave(sequence<float>, sequence<float>, optional PeriodicWaveConstraints) 
+PASS BaseAudioContext interface: operation createScriptProcessor(optional unsigned long, optional unsigned long, optional unsigned long) 
+FAIL BaseAudioContext interface: operation createStereoPanner() assert_own_property: interface prototype object missing non-static operation expected property "createStereoPanner" missing
+PASS BaseAudioContext interface: operation createWaveShaper() 
+FAIL BaseAudioContext interface: operation decodeAudioData(ArrayBuffer, optional DecodeSuccessCallback?, optional DecodeErrorCallback?) assert_equals: property has wrong .length expected 1 but got 2
+PASS AudioContext interface: existence and properties of interface object 
 PASS AudioContext interface object length 
 PASS AudioContext interface object name 
-FAIL AudioContext interface: existence and properties of interface prototype object assert_own_property: self does not have own property "BaseAudioContext" expected property "BaseAudioContext" missing
+PASS AudioContext interface: existence and properties of interface prototype object 
 PASS AudioContext interface: existence and properties of interface prototype object's "constructor" property 
 PASS AudioContext interface: existence and properties of interface prototype object's @@unscopables property 
 FAIL AudioContext interface: attribute baseLatency assert_true: The prototype object must have a property "baseLatency" expected true got false
@@ -106,17 +106,17 @@ FAIL BaseAudioContext interface: context must inherit property "createStereoPann
 PASS BaseAudioContext interface: context must inherit property "createWaveShaper()" with the proper type 
 PASS BaseAudioContext interface: context must inherit property "decodeAudioData(ArrayBuffer, optional DecodeSuccessCallback?, optional DecodeErrorCallback?)" with the proper type 
 FAIL BaseAudioContext interface: calling decodeAudioData(ArrayBuffer, optional DecodeSuccessCallback?, optional DecodeErrorCallback?) on context with too few arguments must throw TypeError assert_unreached: Throws "TypeError: Not enough arguments" instead of rejecting promise Reached unreachable code
-FAIL OfflineAudioContext interface: existence and properties of interface object assert_own_property: self does not have own property "BaseAudioContext" expected property "BaseAudioContext" missing
+PASS OfflineAudioContext interface: existence and properties of interface object 
 FAIL OfflineAudioContext interface object length assert_equals: wrong value for OfflineAudioContext.length expected 1 but got 3
 PASS OfflineAudioContext interface object name 
-FAIL OfflineAudioContext interface: existence and properties of interface prototype object assert_own_property: self does not have own property "BaseAudioContext" expected property "BaseAudioContext" missing
+PASS OfflineAudioContext interface: existence and properties of interface prototype object 
 PASS OfflineAudioContext interface: existence and properties of interface prototype object's "constructor" property 
 PASS OfflineAudioContext interface: existence and properties of interface prototype object's @@unscopables property 
-FAIL OfflineAudioContext interface: operation startRendering() assert_own_property: interface prototype object missing non-static operation expected property "startRendering" missing
-FAIL OfflineAudioContext interface: operation resume() assert_own_property: interface prototype object missing non-static operation expected property "resume" missing
-FAIL OfflineAudioContext interface: operation suspend(double) assert_own_property: interface prototype object missing non-static operation expected property "suspend" missing
+FAIL OfflineAudioContext interface: operation startRendering() assert_unreached: Throws "TypeError: Can only call OfflineAudioContext.startRendering on instances of OfflineAudioContext" instead of rejecting promise Reached unreachable code
+PASS OfflineAudioContext interface: operation resume() 
+FAIL OfflineAudioContext interface: operation suspend(double) assert_equals: property has wrong .length expected 1 but got 0
 FAIL OfflineAudioContext interface: attribute length assert_true: The prototype object must have a property "length" expected true got false
-FAIL OfflineAudioContext interface: attribute oncomplete assert_own_property: expected property "oncomplete" missing
+PASS OfflineAudioContext interface: attribute oncomplete 
 PASS OfflineAudioContext must be primary interface of new OfflineAudioContext(1, 1, sample_rate) 
 PASS Stringification of new OfflineAudioContext(1, 1, sample_rate) 
 PASS OfflineAudioContext interface: new OfflineAudioContext(1, 1, sample_rate) must inherit property "startRendering()" with the proper type 
index 399a6bd..0227f80 100644 (file)
@@ -439,6 +439,7 @@ set(WebCore_NON_SVG_IDL_FILES
     Modules/webaudio/AudioNode.idl
     Modules/webaudio/AudioParam.idl
     Modules/webaudio/AudioProcessingEvent.idl
+    Modules/webaudio/BaseAudioContext.idl
     Modules/webaudio/BiquadFilterNode.idl
     Modules/webaudio/ChannelMergerNode.idl
     Modules/webaudio/ChannelSplitterNode.idl
index b799537..ffa1101 100644 (file)
@@ -1,3 +1,151 @@
+2020-06-22  Chris Dumez  <cdumez@apple.com>
+
+        Introduce BaseAudioContext interface
+        https://bugs.webkit.org/show_bug.cgi?id=213491
+
+        Reviewed by Geoffrey Garen.
+
+        Introduce BaseAudioContext interface as per W3C WebAudio specification:
+        - https://www.w3.org/TR/webaudio/#BaseAudioContext
+
+        No new tests, rebaselined existing test.
+
+        * CMakeLists.txt:
+        * DerivedSources-input.xcfilelist:
+        * DerivedSources-output.xcfilelist:
+        * DerivedSources.make:
+        * Modules/webaudio/AudioContext.cpp:
+        (WebCore::AudioContext::AudioContext):
+        (WebCore::AudioContext::close):
+        (WebCore::AudioContext::createMediaElementSource):
+        (WebCore::AudioContext::createMediaStreamSource):
+        (WebCore::AudioContext::createMediaStreamDestination):
+        * Modules/webaudio/AudioContext.h:
+        * Modules/webaudio/AudioContext.idl:
+        * Modules/webaudio/AudioNode.cpp:
+        (WebCore::AudioNode::contextForBindings const):
+        * Modules/webaudio/AudioNode.h:
+        * Modules/webaudio/AudioNode.idl:
+        * Modules/webaudio/BaseAudioContext.cpp: Copied from Source/WebCore/Modules/webaudio/AudioContext.cpp.
+        (WebCore::BaseAudioContext::isSampleRateRangeGood):
+        (WebCore::AudioContextBase::AudioContextBase):
+        (WebCore::BaseAudioContext::BaseAudioContext):
+        (WebCore::BaseAudioContext::constructCommon):
+        (WebCore::BaseAudioContext::~BaseAudioContext):
+        (WebCore::BaseAudioContext::lazyInitialize):
+        (WebCore::BaseAudioContext::clear):
+        (WebCore::BaseAudioContext::uninitialize):
+        (WebCore::BaseAudioContext::isInitialized const):
+        (WebCore::BaseAudioContext::addReaction):
+        (WebCore::BaseAudioContext::setState):
+        (WebCore::BaseAudioContext::stop):
+        (WebCore::BaseAudioContext::suspend):
+        (WebCore::BaseAudioContext::resume):
+        (WebCore::BaseAudioContext::activeDOMObjectName const):
+        (WebCore::AudioContextBase::document const):
+        (WebCore::BaseAudioContext::hostingDocumentIdentifier const):
+        (WebCore::BaseAudioContext::isSuspended const):
+        (WebCore::BaseAudioContext::visibilityStateChanged):
+        (WebCore::BaseAudioContext::wouldTaintOrigin const):
+        (WebCore::BaseAudioContext::createBuffer):
+        (WebCore::BaseAudioContext::decodeAudioData):
+        (WebCore::BaseAudioContext::createBufferSource):
+        (WebCore::BaseAudioContext::createScriptProcessor):
+        (WebCore::BaseAudioContext::createBiquadFilter):
+        (WebCore::BaseAudioContext::createWaveShaper):
+        (WebCore::BaseAudioContext::createPanner):
+        (WebCore::BaseAudioContext::createConvolver):
+        (WebCore::BaseAudioContext::createDynamicsCompressor):
+        (WebCore::BaseAudioContext::createAnalyser):
+        (WebCore::BaseAudioContext::createGain):
+        (WebCore::BaseAudioContext::createDelay):
+        (WebCore::BaseAudioContext::createChannelSplitter):
+        (WebCore::BaseAudioContext::createChannelMerger):
+        (WebCore::BaseAudioContext::createOscillator):
+        (WebCore::BaseAudioContext::createPeriodicWave):
+        (WebCore::BaseAudioContext::notifyNodeFinishedProcessing):
+        (WebCore::BaseAudioContext::derefFinishedSourceNodes):
+        (WebCore::BaseAudioContext::refNode):
+        (WebCore::BaseAudioContext::derefNode):
+        (WebCore::BaseAudioContext::derefUnfinishedSourceNodes):
+        (WebCore::BaseAudioContext::lock):
+        (WebCore::BaseAudioContext::tryLock):
+        (WebCore::BaseAudioContext::unlock):
+        (WebCore::BaseAudioContext::isAudioThread const):
+        (WebCore::BaseAudioContext::isGraphOwner const):
+        (WebCore::BaseAudioContext::addDeferredFinishDeref):
+        (WebCore::BaseAudioContext::handlePreRenderTasks):
+        (WebCore::BaseAudioContext::handlePostRenderTasks):
+        (WebCore::BaseAudioContext::handleDeferredFinishDerefs):
+        (WebCore::BaseAudioContext::markForDeletion):
+        (WebCore::BaseAudioContext::scheduleNodeDeletion):
+        (WebCore::BaseAudioContext::deleteMarkedNodes):
+        (WebCore::BaseAudioContext::markSummingJunctionDirty):
+        (WebCore::BaseAudioContext::removeMarkedSummingJunction):
+        (WebCore::BaseAudioContext::eventTargetInterface const):
+        (WebCore::BaseAudioContext::markAudioNodeOutputDirty):
+        (WebCore::BaseAudioContext::handleDirtyAudioSummingJunctions):
+        (WebCore::BaseAudioContext::handleDirtyAudioNodeOutputs):
+        (WebCore::BaseAudioContext::addAutomaticPullNode):
+        (WebCore::BaseAudioContext::removeAutomaticPullNode):
+        (WebCore::BaseAudioContext::updateAutomaticPullNodes):
+        (WebCore::BaseAudioContext::processAutomaticPullNodes):
+        (WebCore::AudioContextBase::scriptExecutionContext const):
+        (WebCore::BaseAudioContext::nodeWillBeginPlayback):
+        (WebCore::shouldDocumentAllowWebAudioToAutoPlay):
+        (WebCore::BaseAudioContext::willBeginPlayback):
+        (WebCore::BaseAudioContext::willPausePlayback):
+        (WebCore::BaseAudioContext::startRendering):
+        (WebCore::BaseAudioContext::mediaCanStart):
+        (WebCore::BaseAudioContext::mediaState const):
+        (WebCore::BaseAudioContext::pageMutedStateDidChange):
+        (WebCore::BaseAudioContext::isPlayingAudioDidChange):
+        (WebCore::BaseAudioContext::finishedRendering):
+        (WebCore::BaseAudioContext::dispatchEvent):
+        (WebCore::BaseAudioContext::incrementActiveSourceCount):
+        (WebCore::BaseAudioContext::decrementActiveSourceCount):
+        (WebCore::BaseAudioContext::suspendRendering):
+        (WebCore::BaseAudioContext::resumeRendering):
+        (WebCore::BaseAudioContext::suspendPlayback):
+        (WebCore::BaseAudioContext::mayResumePlayback):
+        (WebCore::BaseAudioContext::postTask):
+        (WebCore::BaseAudioContext::origin const):
+        (WebCore::BaseAudioContext::addConsoleMessage):
+        (WebCore::BaseAudioContext::clearPendingActivity):
+        (WebCore::BaseAudioContext::makePendingActivity):
+        (WebCore::BaseAudioContext::logChannel const):
+        * Modules/webaudio/BaseAudioContext.h: Copied from Source/WebCore/Modules/webaudio/AudioContext.h.
+        (WebCore::AudioContextBase::AutoLocker::AutoLocker):
+        (WebCore::AudioContextBase::AutoLocker::~AutoLocker):
+        (WebCore::BaseAudioContext::destination):
+        (WebCore::BaseAudioContext::activeSourceCount const):
+        (WebCore::BaseAudioContext::listener):
+        (WebCore::BaseAudioContext::state const):
+        (WebCore::BaseAudioContext::isClosed const):
+        (WebCore::BaseAudioContext::connectionCount const):
+        (WebCore::BaseAudioContext::audioThread const):
+        (WebCore::BaseAudioContext::maxNumberOfChannels):
+        (WebCore::BaseAudioContext::destinationNode const):
+        (WebCore::BaseAudioContext::userGestureRequiredForAudioStart const):
+        (WebCore::BaseAudioContext::pageConsentRequiredForAudioStart const):
+        (isType):
+        * Modules/webaudio/BaseAudioContext.idl: Copied from Source/WebCore/Modules/webaudio/AudioContext.idl.
+        * Modules/webaudio/OfflineAudioContext.cpp:
+        (WebCore::OfflineAudioContext::OfflineAudioContext):
+        * Modules/webaudio/OfflineAudioContext.h:
+        * Modules/webaudio/OfflineAudioContext.idl:
+        * Modules/webaudio/PannerNode.cpp:
+        (WebCore::PannerNode::PannerNode):
+        * Modules/webaudio/PannerNode.h:
+        * Sources.txt:
+        * WebCore.xcodeproj/project.pbxproj:
+        * bindings/js/WebCoreBuiltinNames.h:
+        * dom/EventTargetFactory.in:
+        * testing/Internals.cpp:
+        (WebCore::Internals::setAudioContextRestrictions):
+        * testing/Internals.h:
+        * testing/Internals.idl:
+
 2020-06-22  Andres Gonzalez  <andresg_22@apple.com>
 
         AXIsolatedTree::generateSubtree should properly assign the generated subtree to its parent node.
index f7d64c1..b47d257 100644 (file)
@@ -306,6 +306,7 @@ $(PROJECT_DIR)/Modules/webaudio/AudioListener.idl
 $(PROJECT_DIR)/Modules/webaudio/AudioNode.idl
 $(PROJECT_DIR)/Modules/webaudio/AudioParam.idl
 $(PROJECT_DIR)/Modules/webaudio/AudioProcessingEvent.idl
+$(PROJECT_DIR)/Modules/webaudio/BaseAudioContext.idl
 $(PROJECT_DIR)/Modules/webaudio/BiquadFilterNode.idl
 $(PROJECT_DIR)/Modules/webaudio/ChannelMergerNode.idl
 $(PROJECT_DIR)/Modules/webaudio/ChannelSplitterNode.idl
index cbc648a..551cdb1 100644 (file)
@@ -187,6 +187,8 @@ $(BUILT_PRODUCTS_DIR)/DerivedSources/WebCore/JSAuthenticatorTransport.cpp
 $(BUILT_PRODUCTS_DIR)/DerivedSources/WebCore/JSAuthenticatorTransport.h
 $(BUILT_PRODUCTS_DIR)/DerivedSources/WebCore/JSBarProp.cpp
 $(BUILT_PRODUCTS_DIR)/DerivedSources/WebCore/JSBarProp.h
+$(BUILT_PRODUCTS_DIR)/DerivedSources/WebCore/JSBaseAudioContext.cpp
+$(BUILT_PRODUCTS_DIR)/DerivedSources/WebCore/JSBaseAudioContext.h
 $(BUILT_PRODUCTS_DIR)/DerivedSources/WebCore/JSBasicCredential.cpp
 $(BUILT_PRODUCTS_DIR)/DerivedSources/WebCore/JSBasicCredential.h
 $(BUILT_PRODUCTS_DIR)/DerivedSources/WebCore/JSBeforeLoadEvent.cpp
index b27c79c..f3f9a41 100644 (file)
@@ -389,6 +389,7 @@ JS_BINDING_IDLS = \
     $(WebCore)/Modules/webaudio/AudioNode.idl \
     $(WebCore)/Modules/webaudio/AudioParam.idl \
     $(WebCore)/Modules/webaudio/AudioProcessingEvent.idl \
+    $(WebCore)/Modules/webaudio/BaseAudioContext.idl \
     $(WebCore)/Modules/webaudio/BiquadFilterNode.idl \
     $(WebCore)/Modules/webaudio/ChannelMergerNode.idl \
     $(WebCore)/Modules/webaudio/ChannelSplitterNode.idl \
index 26e1fc7..383e1fd 100644 (file)
 #if ENABLE(WEB_AUDIO)
 
 #include "AudioContext.h"
-
-#include "AnalyserNode.h"
-#include "AsyncAudioDecoder.h"
-#include "AudioBuffer.h"
-#include "AudioBufferCallback.h"
-#include "AudioBufferSourceNode.h"
-#include "AudioListener.h"
-#include "AudioNodeInput.h"
-#include "AudioNodeOutput.h"
-#include "AudioSession.h"
-#include "BiquadFilterNode.h"
-#include "ChannelMergerNode.h"
-#include "ChannelSplitterNode.h"
-#include "ConvolverNode.h"
-#include "DefaultAudioDestinationNode.h"
-#include "DelayNode.h"
-#include "Document.h"
-#include "DynamicsCompressorNode.h"
-#include "EventNames.h"
-#include "FFTFrame.h"
-#include "Frame.h"
-#include "FrameLoader.h"
-#include "GainNode.h"
-#include "GenericEventQueue.h"
-#include "HRTFDatabaseLoader.h"
-#include "HRTFPanner.h"
 #include "JSDOMPromiseDeferred.h"
-#include "Logging.h"
-#include "NetworkingContext.h"
-#include "OfflineAudioCompletionEvent.h"
-#include "OfflineAudioDestinationNode.h"
-#include "OscillatorNode.h"
-#include "Page.h"
-#include "PannerNode.h"
-#include "PeriodicWave.h"
-#include "PlatformMediaSessionManager.h"
-#include "ScriptController.h"
-#include "ScriptProcessorNode.h"
-#include "WaveShaperNode.h"
-#include <JavaScriptCore/ScriptCallStack.h>
 
 #if ENABLE(MEDIA_STREAM)
 #include "MediaStream.h"
 #include "MediaElementAudioSourceNode.h"
 #endif
 
-#if DEBUG_AUDIONODE_REFERENCES
-#include <stdio.h>
-#endif
-
-#if USE(GSTREAMER)
-#include "GStreamerCommon.h"
-#endif
-
-#if PLATFORM(IOS_FAMILY)
-#include "ScriptController.h"
-#include "Settings.h"
-#endif
-
-#include <JavaScriptCore/ArrayBuffer.h>
-#include <wtf/Atomics.h>
-#include <wtf/IsoMallocInlines.h>
-#include <wtf/MainThread.h>
-#include <wtf/Ref.h>
-#include <wtf/RefCounted.h>
-#include <wtf/Scope.h>
-#include <wtf/text/WTFString.h>
-
-const unsigned MaxPeriodicWaveLength = 4096;
-
 namespace WebCore {
 
-WTF_MAKE_ISO_ALLOCATED_IMPL(AudioContextBase);
-WTF_MAKE_ISO_ALLOCATED_IMPL(AudioContext);
-
-#define RELEASE_LOG_IF_ALLOWED(fmt, ...) RELEASE_LOG_IF(document() && document()->page() && document()->page()->isAlwaysOnLoggingAllowed(), Media, "%p - AudioContext::" fmt, this, ##__VA_ARGS__)
-    
-bool AudioContext::isSampleRateRangeGood(float sampleRate)
-{
-    // FIXME: It would be nice if the minimum sample-rate could be less than 44.1KHz,
-    // but that will require some fixes in HRTFPanner::fftSizeForSampleRate(), and some testing there.
-    return sampleRate >= 44100 && sampleRate <= 96000;
-}
-
 #if OS(WINDOWS)
 // Don't allow more than this number of simultaneous AudioContexts talking to hardware.
 constexpr unsigned maxHardwareContexts = 4;
 #endif
-unsigned AudioContext::s_hardwareContextCount = 0;
+
+WTF_MAKE_ISO_ALLOCATED_IMPL(AudioContext);
     
 ExceptionOr<Ref<AudioContext>> AudioContext::create(Document& document)
 {
@@ -138,326 +64,38 @@ ExceptionOr<Ref<AudioContext>> AudioContext::create(Document& document)
     return audioContext;
 }
 
-AudioContextBase::AudioContextBase(Document& document)
-    : ActiveDOMObject(document)
-{
-}
-
 // Constructor for rendering to the audio hardware.
 AudioContext::AudioContext(Document& document)
-    : AudioContextBase(document)
-#if !RELEASE_LOG_DISABLED
-    , m_logger(document.logger())
-    , m_logIdentifier(uniqueLogIdentifier())
-#endif
-    , m_mediaSession(PlatformMediaSession::create(PlatformMediaSessionManager::sharedManager(), *this))
-    , m_eventQueue(MainThreadGenericEventQueue::create(*this))
+    : BaseAudioContext(document)
 {
-    // According to spec AudioContext must die only after page navigate.
-    // Lets mark it as ActiveDOMObject with pending activity and unmark it in clear method.
-    makePendingActivity();
-
-    constructCommon();
-
-    m_destinationNode = DefaultAudioDestinationNode::create(*this);
-
-    // Initialize the destination node's muted state to match the page's current muted state.
-    pageMutedStateDidChange();
-
-    document.addAudioProducer(*this);
-    document.registerForVisibilityStateChangedCallbacks(*this);
 }
 
 // Constructor for offline (non-realtime) rendering.
 AudioContext::AudioContext(Document& document, AudioBuffer* renderTarget)
-    : AudioContextBase(document)
-#if !RELEASE_LOG_DISABLED
-    , m_logger(document.logger())
-    , m_logIdentifier(uniqueLogIdentifier())
-#endif
-    , m_isOfflineContext(true)
-    , m_mediaSession(PlatformMediaSession::create(PlatformMediaSessionManager::sharedManager(), *this))
-    , m_eventQueue(MainThreadGenericEventQueue::create(*this))
-    , m_renderTarget(renderTarget)
-{
-    constructCommon();
-
-    // Create a new destination for offline rendering.
-    m_destinationNode = OfflineAudioDestinationNode::create(*this, m_renderTarget.get());
-}
-
-void AudioContext::constructCommon()
-{
-    FFTFrame::initialize();
-    
-    m_listener = AudioListener::create();
-
-    ASSERT(document());
-    if (document()->audioPlaybackRequiresUserGesture())
-        addBehaviorRestriction(RequireUserGestureForAudioStartRestriction);
-    else
-        m_restrictions = NoRestrictions;
-
-#if PLATFORM(COCOA)
-    addBehaviorRestriction(RequirePageConsentForAudioStartRestriction);
-#endif
-}
-
-AudioContext::~AudioContext()
-{
-#if DEBUG_AUDIONODE_REFERENCES
-    fprintf(stderr, "%p: AudioContext::~AudioContext()\n", this);
-#endif
-    ASSERT(!m_isInitialized);
-    ASSERT(m_isStopScheduled);
-    ASSERT(m_nodesToDelete.isEmpty());
-    ASSERT(m_referencedNodes.isEmpty());
-    ASSERT(m_finishedNodes.isEmpty()); // FIXME (bug 105870): This assertion fails on tests sometimes.
-    ASSERT(m_automaticPullNodes.isEmpty());
-    if (m_automaticPullNodesNeedUpdating)
-        m_renderingAutomaticPullNodes.resize(m_automaticPullNodes.size());
-    ASSERT(m_renderingAutomaticPullNodes.isEmpty());
-    // FIXME: Can we assert that m_deferredFinishDerefList is empty?
-
-    if (!isOfflineContext() && scriptExecutionContext()) {
-        document()->removeAudioProducer(*this);
-        document()->unregisterForVisibilityStateChangedCallbacks(*this);
-    }
-}
-
-void AudioContext::lazyInitialize()
-{
-    ASSERT(!m_isStopScheduled);
-
-    if (m_isInitialized)
-        return;
-
-    // Don't allow the context to initialize a second time after it's already been explicitly uninitialized.
-    ASSERT(!m_isAudioThreadFinished);
-    if (m_isAudioThreadFinished)
-        return;
-
-    if (m_destinationNode) {
-        m_destinationNode->initialize();
-
-        if (!isOfflineContext()) {
-            // This starts the audio thread. The destination node's provideInput() method will now be called repeatedly to render audio.
-            // Each time provideInput() is called, a portion of the audio stream is rendered. Let's call this time period a "render quantum".
-            // NOTE: for now default AudioContext does not need an explicit startRendering() call from JavaScript.
-            // We may want to consider requiring it for symmetry with OfflineAudioContext.
-            startRendering();
-            ++s_hardwareContextCount;
-        }
-    }
-    m_isInitialized = true;
-}
-
-void AudioContext::clear()
+    : BaseAudioContext(document, renderTarget)
 {
-    Ref<AudioContext> protectedThis(*this);
-
-    // We have to release our reference to the destination node before the context will ever be deleted since the destination node holds a reference to the context.
-    if (m_destinationNode)
-        m_destinationNode = nullptr;
-
-    // Audio thread is dead. Nobody will schedule node deletion action. Let's do it ourselves.
-    do {
-        deleteMarkedNodes();
-        m_nodesToDelete.appendVector(m_nodesMarkedForDeletion);
-        m_nodesMarkedForDeletion.clear();
-    } while (m_nodesToDelete.size());
-
-    clearPendingActivity();
 }
 
-void AudioContext::uninitialize()
+void AudioContext::close(DOMPromiseDeferred<void>&& promise)
 {
-    ALWAYS_LOG(LOGIDENTIFIER);
-    
-    ASSERT(isMainThread());
-
-    if (!m_isInitialized)
+    if (isOfflineContext() || isStopped()) {
+        promise.reject(InvalidStateError);
         return;
-
-    // This stops the audio thread and all audio rendering.
-    if (m_destinationNode)
-        m_destinationNode->uninitialize();
-
-    // Don't allow the context to initialize a second time after it's already been explicitly uninitialized.
-    m_isAudioThreadFinished = true;
-
-    if (!isOfflineContext()) {
-        ASSERT(s_hardwareContextCount);
-        --s_hardwareContextCount;
-
-        // Offline contexts move to 'Closed' state when dispatching the completion event.
-        setState(State::Closed);
     }
 
-    // Get rid of the sources which may still be playing.
-    derefUnfinishedSourceNodes();
-
-    m_isInitialized = false;
-}
-
-bool AudioContext::isInitialized() const
-{
-    return m_isInitialized;
-}
-
-void AudioContext::addReaction(State state, DOMPromiseDeferred<void>&& promise)
-{
-    size_t stateIndex = static_cast<size_t>(state);
-    if (stateIndex >= m_stateReactions.size())
-        m_stateReactions.grow(stateIndex + 1);
-
-    m_stateReactions[stateIndex].append(WTFMove(promise));
-}
-
-void AudioContext::setState(State state)
-{
-    if (m_state == state)
-        return;
-
-    m_state = state;
-    m_eventQueue->enqueueEvent(Event::create(eventNames().statechangeEvent, Event::CanBubble::Yes, Event::IsCancelable::No));
-
-    size_t stateIndex = static_cast<size_t>(state);
-    if (stateIndex >= m_stateReactions.size())
-        return;
-
-    Vector<DOMPromiseDeferred<void>> reactions;
-    m_stateReactions[stateIndex].swap(reactions);
-
-    for (auto& promise : reactions)
+    if (state() == State::Closed || !destinationNode()) {
         promise.resolve();
-}
-
-void AudioContext::stop()
-{
-    ALWAYS_LOG(LOGIDENTIFIER);
-    
-    ASSERT(isMainThread());
-
-    // Usually ScriptExecutionContext calls stop twice.
-    if (m_isStopScheduled)
-        return;
-    m_isStopScheduled = true;
-
-    ASSERT(document());
-    document()->updateIsPlayingMedia();
-
-    uninitialize();
-    clear();
-}
-
-void AudioContext::suspend(ReasonForSuspension)
-{
-    if (state() == State::Running) {
-        m_mediaSession->beginInterruption(PlatformMediaSession::PlaybackSuspended);
-        document()->updateIsPlayingMedia();
-    }
-}
-
-void AudioContext::resume()
-{
-    if (state() == State::Interrupted) {
-        m_mediaSession->endInterruption(PlatformMediaSession::MayResumePlaying);
-        document()->updateIsPlayingMedia();
-    }
-}
-
-const char* AudioContext::activeDOMObjectName() const
-{
-    return "AudioContext";
-}
-
-Document* AudioContextBase::document() const
-{
-    return downcast<Document>(m_scriptExecutionContext);
-}
-
-DocumentIdentifier AudioContext::hostingDocumentIdentifier() const
-{
-    auto* document = downcast<Document>(m_scriptExecutionContext);
-    return document ? document->identifier() : DocumentIdentifier { };
-}
-
-bool AudioContext::isSuspended() const
-{
-    return !document() || document()->activeDOMObjectsAreSuspended() || document()->activeDOMObjectsAreStopped();
-}
-
-void AudioContext::visibilityStateChanged()
-{
-    // Do not suspend if audio is audible.
-    if (!document() || mediaState() == MediaProducer::IsPlayingAudio || m_isStopScheduled)
         return;
-
-    if (document()->hidden()) {
-        if (state() == State::Running) {
-            RELEASE_LOG_IF_ALLOWED("visibilityStateChanged() Suspending playback after going to the background");
-            m_mediaSession->beginInterruption(PlatformMediaSession::EnteringBackground);
-        }
-    } else {
-        if (state() == State::Interrupted) {
-            RELEASE_LOG_IF_ALLOWED("visibilityStateChanged() Resuming playback after entering foreground");
-            m_mediaSession->endInterruption(PlatformMediaSession::MayResumePlaying);
-        }
     }
-}
-
-bool AudioContext::wouldTaintOrigin(const URL& url) const
-{
-    if (url.protocolIsData())
-        return false;
-
-    if (auto* document = this->document())
-        return !document->securityOrigin().canRequest(url);
-
-    return false;
-}
-
-ExceptionOr<Ref<AudioBuffer>> AudioContext::createBuffer(unsigned numberOfChannels, size_t numberOfFrames, float sampleRate)
-{
-    auto audioBuffer = AudioBuffer::create(numberOfChannels, numberOfFrames, sampleRate);
-    if (!audioBuffer)
-        return Exception { NotSupportedError };
-    return audioBuffer.releaseNonNull();
-}
-
-ExceptionOr<Ref<AudioBuffer>> AudioContext::createBuffer(ArrayBuffer& arrayBuffer, bool mixToMono)
-{
-    auto audioBuffer = AudioBuffer::createFromAudioFileData(arrayBuffer.data(), arrayBuffer.byteLength(), mixToMono, sampleRate());
-    if (!audioBuffer)
-        return Exception { SyntaxError };
-    return audioBuffer.releaseNonNull();
-}
-
-void AudioContext::decodeAudioData(Ref<ArrayBuffer>&& audioData, RefPtr<AudioBufferCallback>&& successCallback, RefPtr<AudioBufferCallback>&& errorCallback)
-{
-    if (!m_audioDecoder)
-        m_audioDecoder = makeUnique<AsyncAudioDecoder>();
-    m_audioDecoder->decodeAsync(WTFMove(audioData), sampleRate(), WTFMove(successCallback), WTFMove(errorCallback));
-}
 
-ExceptionOr<Ref<AudioBufferSourceNode>> AudioContext::createBufferSource()
-{
-    ALWAYS_LOG(LOGIDENTIFIER);
-
-    ASSERT(isMainThread());
-
-    if (m_isStopScheduled)
-        return Exception { InvalidStateError };
+    addReaction(State::Closed, WTFMove(promise));
 
     lazyInitialize();
-    Ref<AudioBufferSourceNode> node = AudioBufferSourceNode::create(*this, sampleRate());
-
-    // Because this is an AudioScheduledSourceNode, the context keeps a reference until it has finished playing.
-    // When this happens, AudioScheduledSourceNode::finish() calls AudioContext::notifyNodeFinishedProcessing().
-    refNode(node);
 
-    return node;
+    destinationNode()->close([this, protectedThis = makeRef(*this)] {
+        setState(State::Closed);
+        uninitialize();
+    });
 }
 
 #if ENABLE(VIDEO)
@@ -468,7 +106,7 @@ ExceptionOr<Ref<MediaElementAudioSourceNode>> AudioContext::createMediaElementSo
 
     ASSERT(isMainThread());
 
-    if (m_isStopScheduled || mediaElement.audioSourceNode())
+    if (isStopped() || mediaElement.audioSourceNode())
         return Exception { InvalidStateError };
 
     lazyInitialize();
@@ -488,10 +126,10 @@ ExceptionOr<Ref<MediaElementAudioSourceNode>> AudioContext::createMediaElementSo
 ExceptionOr<Ref<MediaStreamAudioSourceNode>> AudioContext::createMediaStreamSource(MediaStream& mediaStream)
 {
     ALWAYS_LOG(LOGIDENTIFIER);
-    
+
     ASSERT(isMainThread());
 
-    if (m_isStopScheduled)
+    if (isStopped())
         return Exception { InvalidStateError };
 
     auto audioTracks = mediaStream.getAudioTracks();
@@ -519,7 +157,7 @@ ExceptionOr<Ref<MediaStreamAudioSourceNode>> AudioContext::createMediaStreamSour
 
 ExceptionOr<Ref<MediaStreamAudioDestinationNode>> AudioContext::createMediaStreamDestination()
 {
-    if (m_isStopScheduled)
+    if (isStopped())
         return Exception { InvalidStateError };
 
     // FIXME: Add support for an optional argument which specifies the number of channels.
@@ -529,890 +167,6 @@ ExceptionOr<Ref<MediaStreamAudioDestinationNode>> AudioContext::createMediaStrea
 
 #endif
 
-ExceptionOr<Ref<ScriptProcessorNode>> AudioContext::createScriptProcessor(size_t bufferSize, size_t numberOfInputChannels, size_t numberOfOutputChannels)
-{
-    ALWAYS_LOG(LOGIDENTIFIER);
-    
-    ASSERT(isMainThread());
-
-    if (m_isStopScheduled)
-        return Exception { InvalidStateError };
-
-    lazyInitialize();
-
-    // W3C Editor's Draft 06 June 2017
-    //  https://webaudio.github.io/web-audio-api/#widl-BaseAudioContext-createScriptProcessor-ScriptProcessorNode-unsigned-long-bufferSize-unsigned-long-numberOfInputChannels-unsigned-long-numberOfOutputChannels
-
-    // The bufferSize parameter determines the buffer size in units of sample-frames. If it's not passed in,
-    // or if the value is 0, then the implementation will choose the best buffer size for the given environment,
-    // which will be constant power of 2 throughout the lifetime of the node. ... If the value of this parameter
-    // is not one of the allowed power-of-2 values listed above, an IndexSizeError must be thrown.
-    switch (bufferSize) {
-    case 0:
-#if USE(AUDIO_SESSION)
-        // Pick a value between 256 (2^8) and 16384 (2^14), based on the buffer size of the current AudioSession:
-        bufferSize = 1 << std::max<size_t>(8, std::min<size_t>(14, std::log2(AudioSession::sharedSession().bufferSize())));
-#else
-        bufferSize = 2048;
-#endif
-        break;
-    case 256:
-    case 512:
-    case 1024:
-    case 2048:
-    case 4096:
-    case 8192:
-    case 16384:
-        break;
-    default:
-        return Exception { IndexSizeError };
-    }
-
-    // An IndexSizeError exception must be thrown if bufferSize or numberOfInputChannels or numberOfOutputChannels
-    // are outside the valid range. It is invalid for both numberOfInputChannels and numberOfOutputChannels to be zero.
-    // In this case an IndexSizeError must be thrown.
-
-    if (!numberOfInputChannels && !numberOfOutputChannels)
-        return Exception { NotSupportedError };
-
-    // This parameter [numberOfInputChannels] determines the number of channels for this node's input. Values of
-    // up to 32 must be supported. A NotSupportedError must be thrown if the number of channels is not supported.
-
-    if (numberOfInputChannels > maxNumberOfChannels())
-        return Exception { NotSupportedError };
-
-    // This parameter [numberOfOutputChannels] determines the number of channels for this node's output. Values of
-    // up to 32 must be supported. A NotSupportedError must be thrown if the number of channels is not supported.
-
-    if (numberOfOutputChannels > maxNumberOfChannels())
-        return Exception { NotSupportedError };
-
-    auto node = ScriptProcessorNode::create(*this, sampleRate(), bufferSize, numberOfInputChannels, numberOfOutputChannels);
-
-    refNode(node); // context keeps reference until we stop making javascript rendering callbacks
-    return node;
-}
-
-ExceptionOr<Ref<BiquadFilterNode>> AudioContext::createBiquadFilter()
-{
-    ALWAYS_LOG(LOGIDENTIFIER);
-    
-    ASSERT(isMainThread());
-    if (m_isStopScheduled)
-        return Exception { InvalidStateError };
-
-    lazyInitialize();
-
-    return BiquadFilterNode::create(*this, sampleRate());
-}
-
-ExceptionOr<Ref<WaveShaperNode>> AudioContext::createWaveShaper()
-{
-    ALWAYS_LOG(LOGIDENTIFIER);
-    
-    ASSERT(isMainThread());
-    if (m_isStopScheduled)
-        return Exception { InvalidStateError };
-
-    lazyInitialize();
-    return WaveShaperNode::create(*this);
-}
-
-ExceptionOr<Ref<PannerNode>> AudioContext::createPanner()
-{
-    ALWAYS_LOG(LOGIDENTIFIER);
-    
-    ASSERT(isMainThread());
-    if (m_isStopScheduled)
-        return Exception { InvalidStateError };
-
-    lazyInitialize();
-    return PannerNode::create(*this, sampleRate());
-}
-
-ExceptionOr<Ref<ConvolverNode>> AudioContext::createConvolver()
-{
-    ALWAYS_LOG(LOGIDENTIFIER);
-    
-    ASSERT(isMainThread());
-    if (m_isStopScheduled)
-        return Exception { InvalidStateError };
-
-    lazyInitialize();
-    return ConvolverNode::create(*this, sampleRate());
-}
-
-ExceptionOr<Ref<DynamicsCompressorNode>> AudioContext::createDynamicsCompressor()
-{
-    ALWAYS_LOG(LOGIDENTIFIER);
-    
-    ASSERT(isMainThread());
-    if (m_isStopScheduled)
-        return Exception { InvalidStateError };
-
-    lazyInitialize();
-    return DynamicsCompressorNode::create(*this, sampleRate());
-}
-
-ExceptionOr<Ref<AnalyserNode>> AudioContext::createAnalyser()
-{
-    ALWAYS_LOG(LOGIDENTIFIER);
-    
-    ASSERT(isMainThread());
-    if (m_isStopScheduled)
-        return Exception { InvalidStateError };
-
-    lazyInitialize();
-    return AnalyserNode::create(*this, sampleRate());
-}
-
-ExceptionOr<Ref<GainNode>> AudioContext::createGain()
-{
-    ALWAYS_LOG(LOGIDENTIFIER);
-    
-    ASSERT(isMainThread());
-    if (m_isStopScheduled)
-        return Exception { InvalidStateError };
-
-    lazyInitialize();
-    return GainNode::create(*this, sampleRate());
-}
-
-ExceptionOr<Ref<DelayNode>> AudioContext::createDelay(double maxDelayTime)
-{
-    ALWAYS_LOG(LOGIDENTIFIER);
-    
-    ASSERT(isMainThread());
-    if (m_isStopScheduled)
-        return Exception { InvalidStateError };
-
-    lazyInitialize();
-    return DelayNode::create(*this, sampleRate(), maxDelayTime);
-}
-
-ExceptionOr<Ref<ChannelSplitterNode>> AudioContext::createChannelSplitter(size_t numberOfOutputs)
-{
-    ALWAYS_LOG(LOGIDENTIFIER);
-    
-    ASSERT(isMainThread());
-    if (m_isStopScheduled)
-        return Exception { InvalidStateError };
-
-    lazyInitialize();
-    auto node = ChannelSplitterNode::create(*this, sampleRate(), numberOfOutputs);
-    if (!node)
-        return Exception { IndexSizeError };
-    return node.releaseNonNull();
-}
-
-ExceptionOr<Ref<ChannelMergerNode>> AudioContext::createChannelMerger(size_t numberOfInputs)
-{
-    ALWAYS_LOG(LOGIDENTIFIER);
-    
-    ASSERT(isMainThread());
-    if (m_isStopScheduled)
-        return Exception { InvalidStateError };
-
-    lazyInitialize();
-    auto node = ChannelMergerNode::create(*this, sampleRate(), numberOfInputs);
-    if (!node)
-        return Exception { IndexSizeError };
-    return node.releaseNonNull();
-}
-
-ExceptionOr<Ref<OscillatorNode>> AudioContext::createOscillator()
-{
-    ALWAYS_LOG(LOGIDENTIFIER);
-    
-    ASSERT(isMainThread());
-    if (m_isStopScheduled)
-        return Exception { InvalidStateError };
-
-    lazyInitialize();
-
-    Ref<OscillatorNode> node = OscillatorNode::create(*this, sampleRate());
-
-    // Because this is an AudioScheduledSourceNode, the context keeps a reference until it has finished playing.
-    // When this happens, AudioScheduledSourceNode::finish() calls AudioContext::notifyNodeFinishedProcessing().
-    refNode(node);
-
-    return node;
-}
-
-ExceptionOr<Ref<PeriodicWave>> AudioContext::createPeriodicWave(Float32Array& real, Float32Array& imaginary)
-{
-    ALWAYS_LOG(LOGIDENTIFIER);
-    
-    ASSERT(isMainThread());
-    if (m_isStopScheduled)
-        return Exception { InvalidStateError };
-
-    if (real.length() != imaginary.length() || (real.length() > MaxPeriodicWaveLength) || !real.length())
-        return Exception { IndexSizeError };
-    lazyInitialize();
-    return PeriodicWave::create(sampleRate(), real, imaginary);
-}
-
-void AudioContext::notifyNodeFinishedProcessing(AudioNode* node)
-{
-    ASSERT(isAudioThread());
-    m_finishedNodes.append(node);
-}
-
-void AudioContext::derefFinishedSourceNodes()
-{
-    ASSERT(isGraphOwner());
-    ASSERT(isAudioThread() || isAudioThreadFinished());
-    for (auto& node : m_finishedNodes)
-        derefNode(*node);
-
-    m_finishedNodes.clear();
-}
-
-void AudioContext::refNode(AudioNode& node)
-{
-    ASSERT(isMainThread());
-    AutoLocker locker(*this);
-    
-    node.ref(AudioNode::RefTypeConnection);
-    m_referencedNodes.append(&node);
-}
-
-void AudioContext::derefNode(AudioNode& node)
-{
-    ASSERT(isGraphOwner());
-    
-    node.deref(AudioNode::RefTypeConnection);
-
-    ASSERT(m_referencedNodes.contains(&node));
-    m_referencedNodes.removeFirst(&node);
-}
-
-void AudioContext::derefUnfinishedSourceNodes()
-{
-    ASSERT(isMainThread() && isAudioThreadFinished());
-    for (auto& node : m_referencedNodes)
-        node->deref(AudioNode::RefTypeConnection);
-
-    m_referencedNodes.clear();
-}
-
-void AudioContext::lock(bool& mustReleaseLock)
-{
-    // Don't allow regular lock in real-time audio thread.
-    ASSERT(isMainThread());
-
-    Thread& thisThread = Thread::current();
-
-    if (&thisThread == m_graphOwnerThread) {
-        // We already have the lock.
-        mustReleaseLock = false;
-    } else {
-        // Acquire the lock.
-        m_contextGraphMutex.lock();
-        m_graphOwnerThread = &thisThread;
-        mustReleaseLock = true;
-    }
-}
-
-bool AudioContext::tryLock(bool& mustReleaseLock)
-{
-    Thread& thisThread = Thread::current();
-    bool isAudioThread = &thisThread == audioThread();
-
-    // Try to catch cases of using try lock on main thread - it should use regular lock.
-    ASSERT(isAudioThread || isAudioThreadFinished());
-    
-    if (!isAudioThread) {
-        // In release build treat tryLock() as lock() (since above ASSERT(isAudioThread) never fires) - this is the best we can do.
-        lock(mustReleaseLock);
-        return true;
-    }
-    
-    bool hasLock;
-    
-    if (&thisThread == m_graphOwnerThread) {
-        // Thread already has the lock.
-        hasLock = true;
-        mustReleaseLock = false;
-    } else {
-        // Don't already have the lock - try to acquire it.
-        hasLock = m_contextGraphMutex.tryLock();
-        
-        if (hasLock)
-            m_graphOwnerThread = &thisThread;
-
-        mustReleaseLock = hasLock;
-    }
-    
-    return hasLock;
-}
-
-void AudioContext::unlock()
-{
-    ASSERT(m_graphOwnerThread == &Thread::current());
-
-    m_graphOwnerThread = nullptr;
-    m_contextGraphMutex.unlock();
-}
-
-bool AudioContext::isAudioThread() const
-{
-    return m_audioThread == &Thread::current();
-}
-
-bool AudioContext::isGraphOwner() const
-{
-    return m_graphOwnerThread == &Thread::current();
-}
-
-void AudioContext::addDeferredFinishDeref(AudioNode* node)
-{
-    ASSERT(isAudioThread());
-    m_deferredFinishDerefList.append(node);
-}
-
-void AudioContext::handlePreRenderTasks()
-{
-    ASSERT(isAudioThread());
-
-    // At the beginning of every render quantum, try to update the internal rendering graph state (from main thread changes).
-    // It's OK if the tryLock() fails, we'll just take slightly longer to pick up the changes.
-    bool mustReleaseLock;
-    if (tryLock(mustReleaseLock)) {
-        // Fixup the state of any dirty AudioSummingJunctions and AudioNodeOutputs.
-        handleDirtyAudioSummingJunctions();
-        handleDirtyAudioNodeOutputs();
-
-        updateAutomaticPullNodes();
-
-        if (mustReleaseLock)
-            unlock();
-    }
-}
-
-void AudioContext::handlePostRenderTasks()
-{
-    ASSERT(isAudioThread());
-
-    // Must use a tryLock() here too. Don't worry, the lock will very rarely be contended and this method is called frequently.
-    // The worst that can happen is that there will be some nodes which will take slightly longer than usual to be deleted or removed
-    // from the render graph (in which case they'll render silence).
-    bool mustReleaseLock;
-    if (tryLock(mustReleaseLock)) {
-        // Take care of finishing any derefs where the tryLock() failed previously.
-        handleDeferredFinishDerefs();
-
-        // Dynamically clean up nodes which are no longer needed.
-        derefFinishedSourceNodes();
-
-        // Don't delete in the real-time thread. Let the main thread do it.
-        // Ref-counted objects held by certain AudioNodes may not be thread-safe.
-        scheduleNodeDeletion();
-
-        // Fixup the state of any dirty AudioSummingJunctions and AudioNodeOutputs.
-        handleDirtyAudioSummingJunctions();
-        handleDirtyAudioNodeOutputs();
-
-        updateAutomaticPullNodes();
-
-        if (mustReleaseLock)
-            unlock();
-    }
-}
-
-void AudioContext::handleDeferredFinishDerefs()
-{
-    ASSERT(isAudioThread() && isGraphOwner());
-    for (auto& node : m_deferredFinishDerefList)
-        node->finishDeref(AudioNode::RefTypeConnection);
-    
-    m_deferredFinishDerefList.clear();
-}
-
-void AudioContext::markForDeletion(AudioNode& node)
-{
-    ASSERT(isGraphOwner());
-
-    if (isAudioThreadFinished())
-        m_nodesToDelete.append(&node);
-    else
-        m_nodesMarkedForDeletion.append(&node);
-
-    // This is probably the best time for us to remove the node from automatic pull list,
-    // since all connections are gone and we hold the graph lock. Then when handlePostRenderTasks()
-    // gets a chance to schedule the deletion work, updateAutomaticPullNodes() also gets a chance to
-    // modify m_renderingAutomaticPullNodes.
-    removeAutomaticPullNode(node);
-}
-
-void AudioContext::scheduleNodeDeletion()
-{
-    bool isGood = m_isInitialized && isGraphOwner();
-    ASSERT(isGood);
-    if (!isGood)
-        return;
-
-    // Make sure to call deleteMarkedNodes() on main thread.    
-    if (m_nodesMarkedForDeletion.size() && !m_isDeletionScheduled) {
-        m_nodesToDelete.appendVector(m_nodesMarkedForDeletion);
-        m_nodesMarkedForDeletion.clear();
-
-        m_isDeletionScheduled = true;
-
-        callOnMainThread([protectedThis = makeRef(*this)]() mutable {
-            protectedThis->deleteMarkedNodes();
-        });
-    }
-}
-
-void AudioContext::deleteMarkedNodes()
-{
-    ASSERT(isMainThread());
-
-    // Protect this object from being deleted before we release the mutex locked by AutoLocker.
-    Ref<AudioContext> protectedThis(*this);
-    {
-        AutoLocker locker(*this);
-
-        while (m_nodesToDelete.size()) {
-            AudioNode* node = m_nodesToDelete.takeLast();
-
-            // Before deleting the node, clear out any AudioNodeInputs from m_dirtySummingJunctions.
-            unsigned numberOfInputs = node->numberOfInputs();
-            for (unsigned i = 0; i < numberOfInputs; ++i)
-                m_dirtySummingJunctions.remove(node->input(i));
-
-            // Before deleting the node, clear out any AudioNodeOutputs from m_dirtyAudioNodeOutputs.
-            unsigned numberOfOutputs = node->numberOfOutputs();
-            for (unsigned i = 0; i < numberOfOutputs; ++i)
-                m_dirtyAudioNodeOutputs.remove(node->output(i));
-
-            // Finally, delete it.
-            delete node;
-        }
-        m_isDeletionScheduled = false;
-    }
-}
-
-void AudioContext::markSummingJunctionDirty(AudioSummingJunction* summingJunction)
-{
-    ASSERT(isGraphOwner());    
-    m_dirtySummingJunctions.add(summingJunction);
-}
-
-void AudioContext::removeMarkedSummingJunction(AudioSummingJunction* summingJunction)
-{
-    ASSERT(isMainThread());
-    AutoLocker locker(*this);
-    m_dirtySummingJunctions.remove(summingJunction);
-}
-
-EventTargetInterface AudioContext::eventTargetInterface() const
-{
-    return AudioContextEventTargetInterfaceType;
-}
-
-void AudioContext::markAudioNodeOutputDirty(AudioNodeOutput* output)
-{
-    ASSERT(isGraphOwner());
-    m_dirtyAudioNodeOutputs.add(output);
-}
-
-void AudioContext::handleDirtyAudioSummingJunctions()
-{
-    ASSERT(isGraphOwner());    
-
-    for (auto& junction : m_dirtySummingJunctions)
-        junction->updateRenderingState();
-
-    m_dirtySummingJunctions.clear();
-}
-
-void AudioContext::handleDirtyAudioNodeOutputs()
-{
-    ASSERT(isGraphOwner());    
-
-    for (auto& output : m_dirtyAudioNodeOutputs)
-        output->updateRenderingState();
-
-    m_dirtyAudioNodeOutputs.clear();
-}
-
-void AudioContext::addAutomaticPullNode(AudioNode& node)
-{
-    ASSERT(isGraphOwner());
-
-    if (m_automaticPullNodes.add(&node).isNewEntry)
-        m_automaticPullNodesNeedUpdating = true;
-}
-
-void AudioContext::removeAutomaticPullNode(AudioNode& node)
-{
-    ASSERT(isGraphOwner());
-
-    if (m_automaticPullNodes.remove(&node))
-        m_automaticPullNodesNeedUpdating = true;
-}
-
-void AudioContext::updateAutomaticPullNodes()
-{
-    ASSERT(isGraphOwner());
-
-    if (m_automaticPullNodesNeedUpdating) {
-        // Copy from m_automaticPullNodes to m_renderingAutomaticPullNodes.
-        m_renderingAutomaticPullNodes.resize(m_automaticPullNodes.size());
-
-        unsigned i = 0;
-        for (auto& output : m_automaticPullNodes)
-            m_renderingAutomaticPullNodes[i++] = output;
-
-        m_automaticPullNodesNeedUpdating = false;
-    }
-}
-
-void AudioContext::processAutomaticPullNodes(size_t framesToProcess)
-{
-    ASSERT(isAudioThread());
-
-    for (auto& node : m_renderingAutomaticPullNodes)
-        node->processIfNecessary(framesToProcess);
-}
-
-ScriptExecutionContext* AudioContextBase::scriptExecutionContext() const
-{
-    return ActiveDOMObject::scriptExecutionContext();
-}
-
-void AudioContext::nodeWillBeginPlayback()
-{
-    // Called by scheduled AudioNodes when clients schedule their start times.
-    // Prior to the introduction of suspend(), resume(), and stop(), starting
-    // a scheduled AudioNode would remove the user-gesture restriction, if present,
-    // and would thus unmute the context. Now that AudioContext stays in the
-    // "suspended" state if a user-gesture restriction is present, starting a
-    // schedule AudioNode should set the state to "running", but only if the
-    // user-gesture restriction is set.
-    if (userGestureRequiredForAudioStart())
-        startRendering();
-}
-
-static bool shouldDocumentAllowWebAudioToAutoPlay(const Document& document)
-{
-    if (document.processingUserGestureForMedia() || document.isCapturing())
-        return true;
-    return document.quirks().shouldAutoplayWebAudioForArbitraryUserGesture() && document.topDocument().hasHadUserInteraction();
-}
-
-bool AudioContext::willBeginPlayback()
-{
-    auto* document = this->document();
-    if (!document)
-        return false;
-
-    if (userGestureRequiredForAudioStart()) {
-        if (!shouldDocumentAllowWebAudioToAutoPlay(*document)) {
-            ALWAYS_LOG(LOGIDENTIFIER, "returning false, not processing user gesture or capturing");
-            return false;
-        }
-        removeBehaviorRestriction(AudioContext::RequireUserGestureForAudioStartRestriction);
-    }
-
-    if (pageConsentRequiredForAudioStart()) {
-        auto* page = document->page();
-        if (page && !page->canStartMedia()) {
-            document->addMediaCanStartListener(*this);
-            ALWAYS_LOG(LOGIDENTIFIER, "returning false, page doesn't allow media to start");
-            return false;
-        }
-        removeBehaviorRestriction(AudioContext::RequirePageConsentForAudioStartRestriction);
-    }
-    
-    auto willBegin = m_mediaSession->clientWillBeginPlayback();
-    ALWAYS_LOG(LOGIDENTIFIER, "returning ", willBegin);
-    
-    return willBegin;
-}
-
-bool AudioContext::willPausePlayback()
-{
-    auto* document = this->document();
-    if (!document)
-        return false;
-
-    if (userGestureRequiredForAudioStart()) {
-        if (!document->processingUserGestureForMedia())
-            return false;
-        removeBehaviorRestriction(AudioContext::RequireUserGestureForAudioStartRestriction);
-    }
-
-    if (pageConsentRequiredForAudioStart()) {
-        auto* page = document->page();
-        if (page && !page->canStartMedia()) {
-            document->addMediaCanStartListener(*this);
-            return false;
-        }
-        removeBehaviorRestriction(AudioContext::RequirePageConsentForAudioStartRestriction);
-    }
-    
-    return m_mediaSession->clientWillPausePlayback();
-}
-
-void AudioContext::startRendering()
-{
-    ALWAYS_LOG(LOGIDENTIFIER);
-    if (m_isStopScheduled || !willBeginPlayback())
-        return;
-
-    makePendingActivity();
-
-    destination()->startRendering();
-    setState(State::Running);
-}
-
-void AudioContext::mediaCanStart(Document& document)
-{
-    ASSERT_UNUSED(document, &document == this->document());
-    removeBehaviorRestriction(AudioContext::RequirePageConsentForAudioStartRestriction);
-    mayResumePlayback(true);
-}
-
-MediaProducer::MediaStateFlags AudioContext::mediaState() const
-{
-    if (!m_isStopScheduled && m_destinationNode && m_destinationNode->isPlayingAudio())
-        return MediaProducer::IsPlayingAudio;
-
-    return MediaProducer::IsNotPlaying;
-}
-
-void AudioContext::pageMutedStateDidChange()
-{
-    if (m_destinationNode && document() && document()->page())
-        m_destinationNode->setMuted(document()->page()->isAudioMuted());
-}
-
-void AudioContext::isPlayingAudioDidChange()
-{
-    // Make sure to call Document::updateIsPlayingMedia() on the main thread, since
-    // we could be on the audio I/O thread here and the call into WebCore could block.
-    callOnMainThread([protectedThis = makeRef(*this)] {
-        if (protectedThis->document())
-            protectedThis->document()->updateIsPlayingMedia();
-    });
-}
-
-void AudioContext::finishedRendering(bool didRendering)
-{
-    ASSERT(isOfflineContext());
-    ASSERT(isMainThread());
-    if (!isMainThread())
-        return;
-
-    auto clearPendingActivityIfExitEarly = WTF::makeScopeExit([this] {
-        clearPendingActivity();
-    });
-
-
-    ALWAYS_LOG(LOGIDENTIFIER);
-
-    if (!didRendering)
-        return;
-
-    AudioBuffer* renderedBuffer = m_renderTarget.get();
-    setState(State::Closed);
-
-    ASSERT(renderedBuffer);
-    if (!renderedBuffer)
-        return;
-
-    // Avoid firing the event if the document has already gone away.
-    if (m_isStopScheduled)
-        return;
-
-    clearPendingActivityIfExitEarly.release();
-    m_eventQueue->enqueueEvent(OfflineAudioCompletionEvent::create(renderedBuffer));
-}
-
-void AudioContext::dispatchEvent(Event& event)
-{
-    EventTarget::dispatchEvent(event);
-    if (event.eventInterface() == OfflineAudioCompletionEventInterfaceType)
-        clearPendingActivity();
-}
-
-void AudioContext::incrementActiveSourceCount()
-{
-    ++m_activeSourceCount;
-}
-
-void AudioContext::decrementActiveSourceCount()
-{
-    --m_activeSourceCount;
-}
-
-void AudioContext::suspendRendering(DOMPromiseDeferred<void>&& promise)
-{
-    if (isOfflineContext() || m_isStopScheduled) {
-        promise.reject(InvalidStateError);
-        return;
-    }
-
-    if (m_state == State::Suspended) {
-        promise.resolve();
-        return;
-    }
-
-    if (m_state == State::Closed || m_state == State::Interrupted || !m_destinationNode) {
-        promise.reject();
-        return;
-    }
-
-    addReaction(State::Suspended, WTFMove(promise));
-
-    if (!willPausePlayback())
-        return;
-
-    lazyInitialize();
-
-    m_destinationNode->suspend([this, protectedThis = makeRef(*this)] {
-        setState(State::Suspended);
-    });
-}
-
-void AudioContext::resumeRendering(DOMPromiseDeferred<void>&& promise)
-{
-    if (isOfflineContext() || m_isStopScheduled) {
-        promise.reject(InvalidStateError);
-        return;
-    }
-
-    if (m_state == State::Running) {
-        promise.resolve();
-        return;
-    }
-
-    if (m_state == State::Closed || !m_destinationNode) {
-        promise.reject();
-        return;
-    }
-
-    addReaction(State::Running, WTFMove(promise));
-
-    if (!willBeginPlayback())
-        return;
-
-    lazyInitialize();
-
-    m_destinationNode->resume([this, protectedThis = makeRef(*this)] {
-        setState(State::Running);
-    });
-}
-
-void AudioContext::close(DOMPromiseDeferred<void>&& promise)
-{
-    if (isOfflineContext() || m_isStopScheduled) {
-        promise.reject(InvalidStateError);
-        return;
-    }
-
-    if (m_state == State::Closed || !m_destinationNode) {
-        promise.resolve();
-        return;
-    }
-
-    addReaction(State::Closed, WTFMove(promise));
-
-    lazyInitialize();
-
-    m_destinationNode->close([this, protectedThis = makeRef(*this)] {
-        setState(State::Closed);
-        uninitialize();
-    });
-}
-
-
-void AudioContext::suspendPlayback()
-{
-    if (!m_destinationNode || m_state == State::Closed)
-        return;
-
-    if (m_state == State::Suspended) {
-        if (m_mediaSession->state() == PlatformMediaSession::Interrupted)
-            setState(State::Interrupted);
-        return;
-    }
-
-    lazyInitialize();
-
-    m_destinationNode->suspend([this, protectedThis = makeRef(*this)] {
-        bool interrupted = m_mediaSession->state() == PlatformMediaSession::Interrupted;
-        setState(interrupted ? State::Interrupted : State::Suspended);
-    });
-}
-
-void AudioContext::mayResumePlayback(bool shouldResume)
-{
-    if (!m_destinationNode || m_state == State::Closed || m_state == State::Running)
-        return;
-
-    if (!shouldResume) {
-        setState(State::Suspended);
-        return;
-    }
-
-    if (!willBeginPlayback())
-        return;
-
-    lazyInitialize();
-
-    m_destinationNode->resume([this, protectedThis = makeRef(*this)] {
-        setState(State::Running);
-    });
-}
-
-void AudioContext::postTask(WTF::Function<void()>&& task)
-{
-    if (m_isStopScheduled)
-        return;
-
-    m_scriptExecutionContext->postTask(WTFMove(task));
-}
-
-const SecurityOrigin* AudioContext::origin() const
-{
-    return m_scriptExecutionContext ? m_scriptExecutionContext->securityOrigin() : nullptr;
-}
-
-void AudioContext::addConsoleMessage(MessageSource source, MessageLevel level, const String& message)
-{
-    if (m_scriptExecutionContext)
-        m_scriptExecutionContext->addConsoleMessage(source, level, message);
-}
-
-void AudioContext::clearPendingActivity()
-{
-    if (!m_pendingActivity)
-        return;
-    m_pendingActivity = nullptr;
-    // FIXME: Remove this specific deref() and ref() call in makePendingActivity().
-    deref();
-}
-
-void AudioContext::makePendingActivity()
-{
-    if (m_pendingActivity)
-        return;
-    m_pendingActivity = ActiveDOMObject::makePendingActivity(*this);
-    ref();
-}
-
-#if !RELEASE_LOG_DISABLED
-WTFLogChannel& AudioContext::logChannel() const
-{
-    return LogMedia;
-}
-#endif
-
 } // namespace WebCore
 
 #endif // ENABLE(WEB_AUDIO)
index 505921f..bfa8040 100644 (file)
 
 #pragma once
 
-#include "ActiveDOMObject.h"
-#include "AsyncAudioDecoder.h"
-#include "AudioBus.h"
-#include "AudioContextState.h"
-#include "AudioDestinationNode.h"
-#include "EventTarget.h"
-#include "MediaCanStartListener.h"
-#include "MediaProducer.h"
-#include "PlatformMediaSession.h"
-#include "ScriptExecutionContext.h"
-#include "VisibilityChangeClient.h"
-#include <JavaScriptCore/ConsoleTypes.h>
-#include <JavaScriptCore/Float32Array.h>
-#include <atomic>
-#include <wtf/HashSet.h>
-#include <wtf/LoggerHelper.h>
-#include <wtf/MainThread.h>
-#include <wtf/RefPtr.h>
-#include <wtf/ThreadSafeRefCounted.h>
-#include <wtf/Threading.h>
-#include <wtf/UniqueRef.h>
-#include <wtf/Vector.h>
-#include <wtf/text/AtomStringHash.h>
+#include "BaseAudioContext.h"
 
 namespace WebCore {
 
-class AnalyserNode;
-class AudioBuffer;
-class AudioBufferCallback;
-class AudioBufferSourceNode;
-class AudioListener;
-class AudioSummingJunction;
-class BiquadFilterNode;
-class ChannelMergerNode;
-class ChannelSplitterNode;
-class ConvolverNode;
-class DelayNode;
-class Document;
-class DynamicsCompressorNode;
-class GainNode;
-class HTMLMediaElement;
-class MainThreadGenericEventQueue;
-class MediaElementAudioSourceNode;
-class MediaStream;
-class MediaStreamAudioDestinationNode;
-class MediaStreamAudioSourceNode;
-class OscillatorNode;
-class PannerNode;
-class PeriodicWave;
-class ScriptProcessorNode;
-class SecurityOrigin;
-class WaveShaperNode;
-
-template<typename IDLType> class DOMPromiseDeferred;
-
-class AudioContextBase
-    : public ActiveDOMObject
-    , public ThreadSafeRefCounted<AudioContext>
-    , public EventTargetWithInlineData
-    , public MediaCanStartListener
-    , public MediaProducer
-#if !RELEASE_LOG_DISABLED
-    , public LoggerHelper
-#endif
-{
-    WTF_MAKE_ISO_ALLOCATED(AudioContextBase);
-public:
-    virtual ~AudioContextBase() = default;
-
-    // Reconcile ref/deref which are defined both in ThreadSafeRefCounted and EventTarget.
-    using ThreadSafeRefCounted::ref;
-    using ThreadSafeRefCounted::deref;
-
-    Document* document() const;
-
-    virtual bool isInitialized() const = 0;
-
-    virtual size_t currentSampleFrame() const = 0;
-    virtual float sampleRate() const = 0;
-    virtual double currentTime() const = 0;
-    virtual bool isGraphOwner() const = 0;
-
-    virtual void setAudioThread(Thread&) = 0;
-    virtual bool isAudioThread() const = 0;
-    virtual bool isAudioThreadFinished() = 0;
-
-    virtual void isPlayingAudioDidChange() = 0;
-    virtual void nodeWillBeginPlayback() = 0;
-
-    virtual void postTask(WTF::Function<void()>&&) = 0;
-    virtual bool isStopped() const = 0;
-    virtual const SecurityOrigin* origin() const = 0;
-    virtual void addConsoleMessage(MessageSource, MessageLevel, const String& message) = 0;
-
-    virtual void markForDeletion(AudioNode&) = 0;
-    virtual void deleteMarkedNodes() = 0;
-
-    virtual void handlePreRenderTasks() = 0;
-    virtual void handlePostRenderTasks() = 0;
-    virtual void processAutomaticPullNodes(size_t framesToProcess) = 0;
-    virtual void addDeferredFinishDeref(AudioNode*) = 0;
-
-    virtual void removeMarkedSummingJunction(AudioSummingJunction*) = 0;
-    virtual void markSummingJunctionDirty(AudioSummingJunction*) = 0;
-    virtual void markAudioNodeOutputDirty(AudioNodeOutput*) = 0;
-
-    enum BehaviorRestrictionFlags {
-        NoRestrictions = 0,
-        RequireUserGestureForAudioStartRestriction = 1 << 0,
-        RequirePageConsentForAudioStartRestriction = 1 << 1,
-    };
-    typedef unsigned BehaviorRestrictions;
-    virtual BehaviorRestrictions behaviorRestrictions() const = 0;
-    virtual void addBehaviorRestriction(BehaviorRestrictions) = 0;
-    virtual void removeBehaviorRestriction(BehaviorRestrictions) = 0;
-
-#if !RELEASE_LOG_DISABLED
-    virtual const void* nextAudioNodeLogIdentifier() = 0;
-    virtual const void* nextAudioParameterLogIdentifier() = 0;
-#endif
-
-    virtual void addAutomaticPullNode(AudioNode&) = 0;
-    virtual void removeAutomaticPullNode(AudioNode&) = 0;
-
-    virtual void notifyNodeFinishedProcessing(AudioNode*) = 0;
-
-    virtual void finishedRendering(bool didRendering) = 0;
-
-    virtual void incrementConnectionCount() = 0;
-    virtual void incrementActiveSourceCount() = 0;
-    virtual void decrementActiveSourceCount() = 0;
-
-    virtual bool isOfflineContext() const = 0;
-    virtual bool isAudioContext() const = 0;
-    virtual bool isWebKitAudioContext() const = 0;
-
-    // mustReleaseLock is set to true if we acquired the lock in this method call and caller must unlock(), false if it was previously acquired.
-    virtual void lock(bool& mustReleaseLock) = 0;
-    virtual bool tryLock(bool& mustReleaseLock) = 0;
-    virtual void unlock() = 0;
-
-    class AutoLocker {
-    public:
-        explicit AutoLocker(AudioContextBase& context)
-            : m_context(context)
-        {
-            m_context.lock(m_mustReleaseLock);
-        }
-
-        ~AutoLocker()
-        {
-            if (m_mustReleaseLock)
-                m_context.unlock();
-        }
-
-    private:
-        AudioContextBase& m_context;
-        bool m_mustReleaseLock;
-    };
-
-    // EventTarget
-    ScriptExecutionContext* scriptExecutionContext() const final;
-    void refEventTarget() override { ref(); }
-    void derefEventTarget() override { deref(); }
-
-protected:
-    explicit AudioContextBase(Document&);
-};
-
-// AudioContext is the cornerstone of the web audio API and all AudioNodes are created from it.
-// For thread safety between the audio thread and the main thread, it has a rendering graph locking mechanism. 
-
-class AudioContext
-    : public AudioContextBase
-    , private PlatformMediaSessionClient
-    , private VisibilityChangeClient
-{
+class AudioContext : public BaseAudioContext {
     WTF_MAKE_ISO_ALLOCATED(AudioContext);
 public:
     // Create an AudioContext for rendering to the audio hardware.
     static ExceptionOr<Ref<AudioContext>> create(Document&);
 
-    virtual ~AudioContext();
-
-    bool isInitialized() const final;
-    
-    bool isOfflineContext() const final { return m_isOfflineContext; }
-
-    DocumentIdentifier hostingDocumentIdentifier() const final;
-
-    AudioDestinationNode* destination() { return m_destinationNode.get(); }
-    size_t currentSampleFrame() const final { return m_destinationNode ? m_destinationNode->currentSampleFrame() : 0; }
-    double currentTime() const final { return m_destinationNode ? m_destinationNode->currentTime() : 0.; }
-    float sampleRate() const final { return m_destinationNode ? m_destinationNode->sampleRate() : 0.f; }
-    unsigned long activeSourceCount() const { return static_cast<unsigned long>(m_activeSourceCount); }
-
-    void incrementActiveSourceCount() final;
-    void decrementActiveSourceCount() final;
-    
-    ExceptionOr<Ref<AudioBuffer>> createBuffer(unsigned numberOfChannels, size_t numberOfFrames, float sampleRate);
-    ExceptionOr<Ref<AudioBuffer>> createBuffer(ArrayBuffer&, bool mixToMono);
-
-    // Asynchronous audio file data decoding.
-    void decodeAudioData(Ref<ArrayBuffer>&&, RefPtr<AudioBufferCallback>&&, RefPtr<AudioBufferCallback>&&);
-
-    AudioListener* listener() { return m_listener.get(); }
-
-    void suspendRendering(DOMPromiseDeferred<void>&&);
-    void resumeRendering(DOMPromiseDeferred<void>&&);
     void close(DOMPromiseDeferred<void>&&);
 
-    using State = AudioContextState;
-    State state() const;
-    bool isClosed() const { return m_state == State::Closed; }
-
-    bool wouldTaintOrigin(const URL&) const;
-
-    // The AudioNode create methods are called on the main thread (from JavaScript).
-    ExceptionOr<Ref<AudioBufferSourceNode>> createBufferSource();
 #if ENABLE(VIDEO)
     ExceptionOr<Ref<MediaElementAudioSourceNode>> createMediaElementSource(HTMLMediaElement&);
 #endif
@@ -251,280 +44,10 @@ public:
     ExceptionOr<Ref<MediaStreamAudioSourceNode>> createMediaStreamSource(MediaStream&);
     ExceptionOr<Ref<MediaStreamAudioDestinationNode>> createMediaStreamDestination();
 #endif
-    ExceptionOr<Ref<GainNode>> createGain();
-    ExceptionOr<Ref<BiquadFilterNode>> createBiquadFilter();
-    ExceptionOr<Ref<WaveShaperNode>> createWaveShaper();
-    ExceptionOr<Ref<DelayNode>> createDelay(double maxDelayTime);
-    ExceptionOr<Ref<PannerNode>> createPanner();
-    ExceptionOr<Ref<ConvolverNode>> createConvolver();
-    ExceptionOr<Ref<DynamicsCompressorNode>> createDynamicsCompressor();
-    ExceptionOr<Ref<AnalyserNode>> createAnalyser();
-    ExceptionOr<Ref<ScriptProcessorNode>> createScriptProcessor(size_t bufferSize, size_t numberOfInputChannels, size_t numberOfOutputChannels);
-    ExceptionOr<Ref<ChannelSplitterNode>> createChannelSplitter(size_t numberOfOutputs);
-    ExceptionOr<Ref<ChannelMergerNode>> createChannelMerger(size_t numberOfInputs);
-    ExceptionOr<Ref<OscillatorNode>> createOscillator();
-    ExceptionOr<Ref<PeriodicWave>> createPeriodicWave(Float32Array& real, Float32Array& imaginary);
-
-    // When a source node has no more processing to do (has finished playing), then it tells the context to dereference it.
-    void notifyNodeFinishedProcessing(AudioNode*) final;
-
-    // Called at the start of each render quantum.
-    void handlePreRenderTasks() final;
-
-    // Called at the end of each render quantum.
-    void handlePostRenderTasks() final;
-
-    // Called periodically at the end of each render quantum to dereference finished source nodes.
-    void derefFinishedSourceNodes();
-
-    // We schedule deletion of all marked nodes at the end of each realtime render quantum.
-    void markForDeletion(AudioNode&) final;
-    void deleteMarkedNodes() final;
-
-    // AudioContext can pull node(s) at the end of each render quantum even when they are not connected to any downstream nodes.
-    // These two methods are called by the nodes who want to add/remove themselves into/from the automatic pull lists.
-    void addAutomaticPullNode(AudioNode&) final;
-    void removeAutomaticPullNode(AudioNode&) final;
-
-    // Called right before handlePostRenderTasks() to handle nodes which need to be pulled even when they are not connected to anything.
-    void processAutomaticPullNodes(size_t framesToProcess) final;
-
-    // Keeps track of the number of connections made.
-    void incrementConnectionCount() final
-    {
-        ASSERT(isMainThread());
-        m_connectionCount++;
-    }
-
-    unsigned connectionCount() const { return m_connectionCount; }
-
-    //
-    // Thread Safety and Graph Locking:
-    //
-    
-    void setAudioThread(Thread& thread) final { m_audioThread = &thread; } // FIXME: check either not initialized or the same
-    Thread* audioThread() const { return m_audioThread; }
-    bool isAudioThread() const final;
-
-    // Returns true only after the audio thread has been started and then shutdown.
-    bool isAudioThreadFinished() final { return m_isAudioThreadFinished; }
-    
-    // mustReleaseLock is set to true if we acquired the lock in this method call and caller must unlock(), false if it was previously acquired.
-    void lock(bool& mustReleaseLock) final;
-
-    // Returns true if we own the lock.
-    // mustReleaseLock is set to true if we acquired the lock in this method call and caller must unlock(), false if it was previously acquired.
-    bool tryLock(bool& mustReleaseLock) final;
-
-    void unlock() final;
-
-    // Returns true if this thread owns the context's lock.
-    bool isGraphOwner() const final;
-
-    // Returns the maximum number of channels we can support.
-    static unsigned maxNumberOfChannels() { return MaxNumberOfChannels; }
-    
-    // In AudioNode::deref() a tryLock() is used for calling finishDeref(), but if it fails keep track here.
-    void addDeferredFinishDeref(AudioNode*) final;
-
-    // In the audio thread at the start of each render cycle, we'll call handleDeferredFinishDerefs().
-    void handleDeferredFinishDerefs();
-
-    // Only accessed when the graph lock is held.
-    void markSummingJunctionDirty(AudioSummingJunction*) final;
-    void markAudioNodeOutputDirty(AudioNodeOutput*) final;
-
-    // Must be called on main thread.
-    void removeMarkedSummingJunction(AudioSummingJunction*) final;
-
-    // EventTarget
-    EventTargetInterface eventTargetInterface() const final;
-
-    void startRendering();
-    void finishedRendering(bool didRendering) final;
-
-    static unsigned s_hardwareContextCount;
-
-    // Restrictions to change default behaviors.
-    BehaviorRestrictions behaviorRestrictions() const final { return m_restrictions; }
-    void addBehaviorRestriction(BehaviorRestrictions restriction) final { m_restrictions |= restriction; }
-    void removeBehaviorRestriction(BehaviorRestrictions restriction) final { m_restrictions &= ~restriction; }
-
-    void isPlayingAudioDidChange() final;
-
-    void nodeWillBeginPlayback() final;
-
-#if !RELEASE_LOG_DISABLED
-    const Logger& logger() const final { return m_logger.get(); }
-    const void* logIdentifier() const final { return m_logIdentifier; }
-    WTFLogChannel& logChannel() const final;
-    const void* nextAudioNodeLogIdentifier() final { return childLogIdentifier(m_logIdentifier, ++m_nextAudioNodeIdentifier); }
-    const void* nextAudioParameterLogIdentifier() final { return childLogIdentifier(m_logIdentifier, ++m_nextAudioParameterIdentifier); }
-#endif
-
-    void postTask(WTF::Function<void()>&&) final;
-    bool isStopped() const final { return m_isStopScheduled; }
-    const SecurityOrigin* origin() const final;
-    void addConsoleMessage(MessageSource, MessageLevel, const String& message) final;
 
-protected:
+private:
     explicit AudioContext(Document&);
     AudioContext(Document&, AudioBuffer* renderTarget);
-    
-    static bool isSampleRateRangeGood(float sampleRate);
-    void clearPendingActivity();
-    void makePendingActivity();
-
-private:
-    void constructCommon();
-
-    void lazyInitialize();
-    void uninitialize();
-
-    bool willBeginPlayback();
-    bool willPausePlayback();
-
-    bool userGestureRequiredForAudioStart() const { return !isOfflineContext() && m_restrictions & RequireUserGestureForAudioStartRestriction; }
-    bool pageConsentRequiredForAudioStart() const { return !isOfflineContext() && m_restrictions & RequirePageConsentForAudioStartRestriction; }
-
-    void setState(State);
-
-    void clear();
-
-    void scheduleNodeDeletion();
-
-    void mediaCanStart(Document&) override;
-
-    // EventTarget
-    void dispatchEvent(Event&) final;
-
-    // MediaProducer
-    MediaProducer::MediaStateFlags mediaState() const override;
-    void pageMutedStateDidChange() override;
-
-    // The context itself keeps a reference to all source nodes.  The source nodes, then reference all nodes they're connected to.
-    // In turn, these nodes reference all nodes they're connected to.  All nodes are ultimately connected to the AudioDestinationNode.
-    // When the context dereferences a source node, it will be deactivated from the rendering graph along with all other nodes it is
-    // uniquely connected to.  See the AudioNode::ref() and AudioNode::deref() methods for more details.
-    void refNode(AudioNode&);
-    void derefNode(AudioNode&);
-
-    // ActiveDOMObject API.
-    void suspend(ReasonForSuspension) final;
-    void resume() final;
-    void stop() override;
-    const char* activeDOMObjectName() const override;
-
-    // When the context goes away, there might still be some sources which haven't finished playing.
-    // Make sure to dereference them here.
-    void derefUnfinishedSourceNodes();
-
-    // PlatformMediaSessionClient
-    PlatformMediaSession::MediaType mediaType() const override { return PlatformMediaSession::MediaType::WebAudio; }
-    PlatformMediaSession::MediaType presentationType() const override { return PlatformMediaSession::MediaType::WebAudio; }
-    void mayResumePlayback(bool shouldResume) override;
-    void suspendPlayback() override;
-    bool canReceiveRemoteControlCommands() const override { return false; }
-    void didReceiveRemoteControlCommand(PlatformMediaSession::RemoteControlCommandType, const PlatformMediaSession::RemoteCommandArgument*) override { }
-    bool supportsSeeking() const override { return false; }
-    bool shouldOverrideBackgroundPlaybackRestriction(PlatformMediaSession::InterruptionType) const override { return false; }
-    bool canProduceAudio() const final { return true; }
-    bool isSuspended() const final;
-
-    void visibilityStateChanged() final;
-
-    bool isAudioContext() const final { return true; }
-    bool isWebKitAudioContext() const final { return false; }
-
-    void handleDirtyAudioSummingJunctions();
-    void handleDirtyAudioNodeOutputs();
-
-    void addReaction(State, DOMPromiseDeferred<void>&&);
-    void updateAutomaticPullNodes();
-
-#if !RELEASE_LOG_DISABLED
-    const char* logClassName() const final { return "AudioContext"; }
-
-    Ref<Logger> m_logger;
-    const void* m_logIdentifier;
-    uint64_t m_nextAudioNodeIdentifier { 0 };
-    uint64_t m_nextAudioParameterIdentifier { 0 };
-#endif
-
-    // Only accessed in the audio thread.
-    Vector<AudioNode*> m_finishedNodes;
-
-    // We don't use RefPtr<AudioNode> here because AudioNode has a more complex ref() / deref() implementation
-    // with an optional argument for refType.  We need to use the special refType: RefTypeConnection
-    // Either accessed when the graph lock is held, or on the main thread when the audio thread has finished.
-    Vector<AudioNode*> m_referencedNodes;
-
-    // Accumulate nodes which need to be deleted here.
-    // This is copied to m_nodesToDelete at the end of a render cycle in handlePostRenderTasks(), where we're assured of a stable graph
-    // state which will have no references to any of the nodes in m_nodesToDelete once the context lock is released
-    // (when handlePostRenderTasks() has completed).
-    Vector<AudioNode*> m_nodesMarkedForDeletion;
-
-    // They will be scheduled for deletion (on the main thread) at the end of a render cycle (in realtime thread).
-    Vector<AudioNode*> m_nodesToDelete;
-
-    bool m_isDeletionScheduled { false };
-    bool m_isStopScheduled { false };
-    bool m_isInitialized { false };
-    bool m_isAudioThreadFinished { false };
-    bool m_automaticPullNodesNeedUpdating { false };
-    bool m_isOfflineContext { false };
-
-    // Only accessed when the graph lock is held.
-    HashSet<AudioSummingJunction*> m_dirtySummingJunctions;
-    HashSet<AudioNodeOutput*> m_dirtyAudioNodeOutputs;
-
-    // For the sake of thread safety, we maintain a seperate Vector of automatic pull nodes for rendering in m_renderingAutomaticPullNodes.
-    // It will be copied from m_automaticPullNodes by updateAutomaticPullNodes() at the very start or end of the rendering quantum.
-    HashSet<AudioNode*> m_automaticPullNodes;
-    Vector<AudioNode*> m_renderingAutomaticPullNodes;
-    // Only accessed in the audio thread.
-    Vector<AudioNode*> m_deferredFinishDerefList;
-    Vector<Vector<DOMPromiseDeferred<void>>> m_stateReactions;
-
-    std::unique_ptr<PlatformMediaSession> m_mediaSession;
-    UniqueRef<MainThreadGenericEventQueue> m_eventQueue;
-
-    RefPtr<AudioBuffer> m_renderTarget;
-    RefPtr<AudioDestinationNode> m_destinationNode;
-    RefPtr<AudioListener> m_listener;
-
-    unsigned m_connectionCount { 0 };
-
-    // Graph locking.
-    Lock m_contextGraphMutex;
-    // FIXME: Using volatile seems incorrect.
-    // https://bugs.webkit.org/show_bug.cgi?id=180332
-    Thread* volatile m_audioThread { nullptr };
-    Thread* volatile m_graphOwnerThread { nullptr }; // if the lock is held then this is the thread which owns it, otherwise == nullptr.
-
-    std::unique_ptr<AsyncAudioDecoder> m_audioDecoder;
-
-    // This is considering 32 is large enough for multiple channels audio. 
-    // It is somewhat arbitrary and could be increased if necessary.
-    enum { MaxNumberOfChannels = 32 };
-
-    // Number of AudioBufferSourceNodes that are active (playing).
-    std::atomic<int> m_activeSourceCount { 0 };
-
-    BehaviorRestrictions m_restrictions { NoRestrictions };
-
-    State m_state { State::Suspended };
-    RefPtr<PendingActivity<AudioContext>> m_pendingActivity;
 };
 
-inline AudioContext::State AudioContext::state() const
-{
-    return m_state;
-}
-
 } // WebCore
-
-SPECIALIZE_TYPE_TRAITS_BEGIN(WebCore::AudioContext)
-    static bool isType(const WebCore::AudioContextBase& context) { return context.isAudioContext(); }
-SPECIALIZE_TYPE_TRAITS_END()
index 9e35246..6993a0a 100644 (file)
     Conditional=WEB_AUDIO,
     EnabledBySetting=WebAudio&ModernUnprefixedWebAudio,
     ExportMacro=WEBCORE_EXPORT,
-] interface AudioContext : EventTarget {
+] interface AudioContext : BaseAudioContext {
     [CallWith=Document, MayThrowException] constructor();
 
-    // All rendered audio ultimately connects to destination, which represents the audio hardware.
-    readonly attribute AudioDestinationNode destination;
-
-    // All scheduled times are relative to this time in seconds.
-    readonly attribute unrestricted double currentTime;
-
-    // All AudioNodes in the context run at this sample-rate (sample-frames per second).
-    readonly attribute unrestricted float sampleRate;
-
-    // All panning is relative to this listener.
-    readonly attribute AudioListener listener;
+    // FIXME: Add support.
+    // readonly attribute double baseLatency;
+    // readonly attribute double outputLatency;
+    // AudioTimestamp getOutputTimestamp();
 
     [ImplementedAs=suspendRendering] Promise<void> suspend();
     [ImplementedAs=resumeRendering] Promise<void> resume();
     Promise<void> close();
 
-    readonly attribute AudioContextState state;
-    attribute EventHandler onstatechange;
-
-    // Number of AudioBufferSourceNodes that are currently playing.
-    readonly attribute unsigned long activeSourceCount;
-    
-    [MayThrowException] AudioBuffer createBuffer(unsigned long numberOfChannels, unsigned long numberOfFrames, unrestricted float sampleRate);
-    [MayThrowException] AudioBuffer createBuffer(ArrayBuffer buffer, boolean mixToMono);
-
-    // Asynchronous audio file data decoding.
-    // FIXME: successCallback should be optional and the callbacks should not be nullable. This should also return a Promise.
-    void decodeAudioData(ArrayBuffer audioData, AudioBufferCallback? successCallback, optional AudioBufferCallback? errorCallback);
-
-    // Sources
-    [MayThrowException] AudioBufferSourceNode createBufferSource();
-
     [Conditional=VIDEO, MayThrowException] MediaElementAudioSourceNode createMediaElementSource(HTMLMediaElement mediaElement);
-
     [Conditional=MEDIA_STREAM, MayThrowException] MediaStreamAudioSourceNode createMediaStreamSource(MediaStream mediaStream);
     [Conditional=MEDIA_STREAM, MayThrowException] MediaStreamAudioDestinationNode createMediaStreamDestination();
 
-    // Processing nodes
-    [MayThrowException] GainNode createGain();
-    [MayThrowException] DelayNode createDelay(optional unrestricted double maxDelayTime = 1);
-    [MayThrowException] BiquadFilterNode createBiquadFilter();
-    [MayThrowException] WaveShaperNode createWaveShaper();
-    [MayThrowException] PannerNode createPanner();
-    [MayThrowException] ConvolverNode createConvolver();
-    [MayThrowException] DynamicsCompressorNode createDynamicsCompressor();
-    [MayThrowException] AnalyserNode createAnalyser();
-    [MayThrowException] ScriptProcessorNode createScriptProcessor(optional unsigned long bufferSize = 0, optional unsigned long numberOfInputChannels = 2, optional unsigned long numberOfOutputChannels = 2);
-    [MayThrowException] OscillatorNode createOscillator();
-    [MayThrowException] PeriodicWave createPeriodicWave(Float32Array real, Float32Array imag);
-
-    // Channel splitting and merging
-    [MayThrowException] ChannelSplitterNode createChannelSplitter(optional unsigned long numberOfOutputs = 6);
-    [MayThrowException] ChannelMergerNode createChannelMerger(optional unsigned long numberOfInputs = 6);
-
-    // Offline rendering
-    // void prepareOfflineBufferRendering(unsigned long numberOfChannels, unsigned long numberOfFrames, unrestricted float sampleRate);
-    attribute EventHandler oncomplete;
-    void startRendering();
+    // FIXME: Add support.
+    // MediaStreamTrackAudioSourceNode createMediaStreamTrackSource(MediaStreamTrack mediaStreamTrack);
 };
index e554377..22e0546 100644 (file)
@@ -531,11 +531,11 @@ void AudioNode::deref(RefType refType)
         context().deleteMarkedNodes();
 }
 
-Variant<RefPtr<AudioContext>, RefPtr<WebKitAudioContext>> AudioNode::contextForBindings() const
+Variant<RefPtr<BaseAudioContext>, RefPtr<WebKitAudioContext>> AudioNode::contextForBindings() const
 {
     if (m_context->isWebKitAudioContext())
         return makeRefPtr(static_cast<WebKitAudioContext&>(m_context.get()));
-    return makeRefPtr(static_cast<AudioContext&>(m_context.get()));
+    return makeRefPtr(static_cast<BaseAudioContext&>(m_context.get()));
 }
 
 void AudioNode::finishDeref(RefType refType)
index a9881bd..bfd917b 100644 (file)
 
 namespace WebCore {
 
-class AudioContext;
 class AudioContextBase;
 class AudioNodeInput;
 class AudioNodeOutput;
 class AudioParam;
+class BaseAudioContext;
 class WebKitAudioContext;
 
 // An AudioNode is the basic building block for handling audio within an AudioContext.
@@ -65,7 +65,7 @@ public:
     AudioContextBase& context() { return m_context.get(); }
     const AudioContextBase& context() const { return m_context.get(); }
 
-    Variant<RefPtr<AudioContext>, RefPtr<WebKitAudioContext>> contextForBindings() const;
+    Variant<RefPtr<BaseAudioContext>, RefPtr<WebKitAudioContext>> contextForBindings() const;
 
     enum NodeType {
         NodeTypeUnknown,
index 71939c8..636a95c 100644 (file)
@@ -25,7 +25,7 @@
 [
     Conditional=WEB_AUDIO,
 ] interface AudioNode : EventTarget {
-    [ImplementedAs=contextForBindings] readonly attribute (AudioContext or WebKitAudioContext) context;
+    [ImplementedAs=contextForBindings] readonly attribute (BaseAudioContext or WebKitAudioContext) context;
     readonly attribute unsigned long numberOfInputs;
     readonly attribute unsigned long numberOfOutputs;
 
diff --git a/Source/WebCore/Modules/webaudio/BaseAudioContext.cpp b/Source/WebCore/Modules/webaudio/BaseAudioContext.cpp
new file mode 100644 (file)
index 0000000..c81fe9a
--- /dev/null
@@ -0,0 +1,1297 @@
+/*
+ * Copyright (C) 2010 Google Inc. All rights reserved.
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1.  Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2.  Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+
+#if ENABLE(WEB_AUDIO)
+
+#include "AudioContext.h"
+
+#include "AnalyserNode.h"
+#include "AsyncAudioDecoder.h"
+#include "AudioBuffer.h"
+#include "AudioBufferCallback.h"
+#include "AudioBufferSourceNode.h"
+#include "AudioListener.h"
+#include "AudioNodeInput.h"
+#include "AudioNodeOutput.h"
+#include "AudioSession.h"
+#include "BiquadFilterNode.h"
+#include "ChannelMergerNode.h"
+#include "ChannelSplitterNode.h"
+#include "ConvolverNode.h"
+#include "DefaultAudioDestinationNode.h"
+#include "DelayNode.h"
+#include "Document.h"
+#include "DynamicsCompressorNode.h"
+#include "EventNames.h"
+#include "FFTFrame.h"
+#include "Frame.h"
+#include "FrameLoader.h"
+#include "GainNode.h"
+#include "GenericEventQueue.h"
+#include "HRTFDatabaseLoader.h"
+#include "HRTFPanner.h"
+#include "JSDOMPromiseDeferred.h"
+#include "Logging.h"
+#include "NetworkingContext.h"
+#include "OfflineAudioCompletionEvent.h"
+#include "OfflineAudioDestinationNode.h"
+#include "OscillatorNode.h"
+#include "Page.h"
+#include "PannerNode.h"
+#include "PeriodicWave.h"
+#include "PlatformMediaSessionManager.h"
+#include "ScriptController.h"
+#include "ScriptProcessorNode.h"
+#include "WaveShaperNode.h"
+#include <JavaScriptCore/ScriptCallStack.h>
+
+#if DEBUG_AUDIONODE_REFERENCES
+#include <stdio.h>
+#endif
+
+#if USE(GSTREAMER)
+#include "GStreamerCommon.h"
+#endif
+
+#if PLATFORM(IOS_FAMILY)
+#include "ScriptController.h"
+#include "Settings.h"
+#endif
+
+#include <JavaScriptCore/ArrayBuffer.h>
+#include <wtf/Atomics.h>
+#include <wtf/IsoMallocInlines.h>
+#include <wtf/MainThread.h>
+#include <wtf/Ref.h>
+#include <wtf/RefCounted.h>
+#include <wtf/Scope.h>
+#include <wtf/text/WTFString.h>
+
+const unsigned MaxPeriodicWaveLength = 4096;
+
+namespace WebCore {
+
+WTF_MAKE_ISO_ALLOCATED_IMPL(AudioContextBase);
+WTF_MAKE_ISO_ALLOCATED_IMPL(BaseAudioContext);
+
+#define RELEASE_LOG_IF_ALLOWED(fmt, ...) RELEASE_LOG_IF(document() && document()->page() && document()->page()->isAlwaysOnLoggingAllowed(), Media, "%p - BaseAudioContext::" fmt, this, ##__VA_ARGS__)
+    
+bool BaseAudioContext::isSampleRateRangeGood(float sampleRate)
+{
+    // FIXME: It would be nice if the minimum sample-rate could be less than 44.1KHz,
+    // but that will require some fixes in HRTFPanner::fftSizeForSampleRate(), and some testing there.
+    return sampleRate >= 44100 && sampleRate <= 96000;
+}
+
+unsigned BaseAudioContext::s_hardwareContextCount = 0;
+
+AudioContextBase::AudioContextBase(Document& document)
+    : ActiveDOMObject(document)
+{
+}
+
+// Constructor for rendering to the audio hardware.
+BaseAudioContext::BaseAudioContext(Document& document)
+    : AudioContextBase(document)
+#if !RELEASE_LOG_DISABLED
+    , m_logger(document.logger())
+    , m_logIdentifier(uniqueLogIdentifier())
+#endif
+    , m_mediaSession(PlatformMediaSession::create(PlatformMediaSessionManager::sharedManager(), *this))
+    , m_eventQueue(MainThreadGenericEventQueue::create(*this))
+{
+    // According to spec AudioContext must die only after page navigate.
+    // Lets mark it as ActiveDOMObject with pending activity and unmark it in clear method.
+    makePendingActivity();
+
+    constructCommon();
+
+    m_destinationNode = DefaultAudioDestinationNode::create(*this);
+
+    // Initialize the destination node's muted state to match the page's current muted state.
+    pageMutedStateDidChange();
+
+    document.addAudioProducer(*this);
+    document.registerForVisibilityStateChangedCallbacks(*this);
+}
+
+// Constructor for offline (non-realtime) rendering.
+BaseAudioContext::BaseAudioContext(Document& document, AudioBuffer* renderTarget)
+    : AudioContextBase(document)
+#if !RELEASE_LOG_DISABLED
+    , m_logger(document.logger())
+    , m_logIdentifier(uniqueLogIdentifier())
+#endif
+    , m_isOfflineContext(true)
+    , m_mediaSession(PlatformMediaSession::create(PlatformMediaSessionManager::sharedManager(), *this))
+    , m_eventQueue(MainThreadGenericEventQueue::create(*this))
+    , m_renderTarget(renderTarget)
+{
+    constructCommon();
+
+    // Create a new destination for offline rendering.
+    m_destinationNode = OfflineAudioDestinationNode::create(*this, m_renderTarget.get());
+}
+
+void BaseAudioContext::constructCommon()
+{
+    FFTFrame::initialize();
+    
+    m_listener = AudioListener::create();
+
+    ASSERT(document());
+    if (document()->audioPlaybackRequiresUserGesture())
+        addBehaviorRestriction(RequireUserGestureForAudioStartRestriction);
+    else
+        m_restrictions = NoRestrictions;
+
+#if PLATFORM(COCOA)
+    addBehaviorRestriction(RequirePageConsentForAudioStartRestriction);
+#endif
+}
+
+BaseAudioContext::~BaseAudioContext()
+{
+#if DEBUG_AUDIONODE_REFERENCES
+    fprintf(stderr, "%p: BaseAudioContext::~AudioContext()\n", this);
+#endif
+    ASSERT(!m_isInitialized);
+    ASSERT(m_isStopScheduled);
+    ASSERT(m_nodesToDelete.isEmpty());
+    ASSERT(m_referencedNodes.isEmpty());
+    ASSERT(m_finishedNodes.isEmpty()); // FIXME (bug 105870): This assertion fails on tests sometimes.
+    ASSERT(m_automaticPullNodes.isEmpty());
+    if (m_automaticPullNodesNeedUpdating)
+        m_renderingAutomaticPullNodes.resize(m_automaticPullNodes.size());
+    ASSERT(m_renderingAutomaticPullNodes.isEmpty());
+    // FIXME: Can we assert that m_deferredFinishDerefList is empty?
+
+    if (!isOfflineContext() && scriptExecutionContext()) {
+        document()->removeAudioProducer(*this);
+        document()->unregisterForVisibilityStateChangedCallbacks(*this);
+    }
+}
+
+void BaseAudioContext::lazyInitialize()
+{
+    ASSERT(!m_isStopScheduled);
+
+    if (m_isInitialized)
+        return;
+
+    // Don't allow the context to initialize a second time after it's already been explicitly uninitialized.
+    ASSERT(!m_isAudioThreadFinished);
+    if (m_isAudioThreadFinished)
+        return;
+
+    if (m_destinationNode) {
+        m_destinationNode->initialize();
+
+        if (!isOfflineContext()) {
+            // This starts the audio thread. The destination node's provideInput() method will now be called repeatedly to render audio.
+            // Each time provideInput() is called, a portion of the audio stream is rendered. Let's call this time period a "render quantum".
+            // NOTE: for now default AudioContext does not need an explicit startRendering() call from JavaScript.
+            // We may want to consider requiring it for symmetry with OfflineAudioContext.
+            startRendering();
+            ++s_hardwareContextCount;
+        }
+    }
+    m_isInitialized = true;
+}
+
+void BaseAudioContext::clear()
+{
+    auto protectedThis = makeRef(*this);
+
+    // We have to release our reference to the destination node before the context will ever be deleted since the destination node holds a reference to the context.
+    if (m_destinationNode)
+        m_destinationNode = nullptr;
+
+    // Audio thread is dead. Nobody will schedule node deletion action. Let's do it ourselves.
+    do {
+        deleteMarkedNodes();
+        m_nodesToDelete.appendVector(m_nodesMarkedForDeletion);
+        m_nodesMarkedForDeletion.clear();
+    } while (m_nodesToDelete.size());
+
+    clearPendingActivity();
+}
+
+void BaseAudioContext::uninitialize()
+{
+    ALWAYS_LOG(LOGIDENTIFIER);
+    
+    ASSERT(isMainThread());
+
+    if (!m_isInitialized)
+        return;
+
+    // This stops the audio thread and all audio rendering.
+    if (m_destinationNode)
+        m_destinationNode->uninitialize();
+
+    // Don't allow the context to initialize a second time after it's already been explicitly uninitialized.
+    m_isAudioThreadFinished = true;
+
+    if (!isOfflineContext()) {
+        ASSERT(s_hardwareContextCount);
+        --s_hardwareContextCount;
+
+        // Offline contexts move to 'Closed' state when dispatching the completion event.
+        setState(State::Closed);
+    }
+
+    // Get rid of the sources which may still be playing.
+    derefUnfinishedSourceNodes();
+
+    m_isInitialized = false;
+}
+
+bool BaseAudioContext::isInitialized() const
+{
+    return m_isInitialized;
+}
+
+void BaseAudioContext::addReaction(State state, DOMPromiseDeferred<void>&& promise)
+{
+    size_t stateIndex = static_cast<size_t>(state);
+    if (stateIndex >= m_stateReactions.size())
+        m_stateReactions.grow(stateIndex + 1);
+
+    m_stateReactions[stateIndex].append(WTFMove(promise));
+}
+
+void BaseAudioContext::setState(State state)
+{
+    if (m_state == state)
+        return;
+
+    m_state = state;
+    m_eventQueue->enqueueEvent(Event::create(eventNames().statechangeEvent, Event::CanBubble::Yes, Event::IsCancelable::No));
+
+    size_t stateIndex = static_cast<size_t>(state);
+    if (stateIndex >= m_stateReactions.size())
+        return;
+
+    Vector<DOMPromiseDeferred<void>> reactions;
+    m_stateReactions[stateIndex].swap(reactions);
+
+    for (auto& promise : reactions)
+        promise.resolve();
+}
+
+void BaseAudioContext::stop()
+{
+    ALWAYS_LOG(LOGIDENTIFIER);
+    
+    ASSERT(isMainThread());
+
+    // Usually ScriptExecutionContext calls stop twice.
+    if (m_isStopScheduled)
+        return;
+    m_isStopScheduled = true;
+
+    ASSERT(document());
+    document()->updateIsPlayingMedia();
+
+    uninitialize();
+    clear();
+}
+
+void BaseAudioContext::suspend(ReasonForSuspension)
+{
+    if (state() == State::Running) {
+        m_mediaSession->beginInterruption(PlatformMediaSession::PlaybackSuspended);
+        document()->updateIsPlayingMedia();
+    }
+}
+
+void BaseAudioContext::resume()
+{
+    if (state() == State::Interrupted) {
+        m_mediaSession->endInterruption(PlatformMediaSession::MayResumePlaying);
+        document()->updateIsPlayingMedia();
+    }
+}
+
+const char* BaseAudioContext::activeDOMObjectName() const
+{
+    return "AudioContext";
+}
+
+Document* AudioContextBase::document() const
+{
+    return downcast<Document>(m_scriptExecutionContext);
+}
+
+DocumentIdentifier BaseAudioContext::hostingDocumentIdentifier() const
+{
+    auto* document = downcast<Document>(m_scriptExecutionContext);
+    return document ? document->identifier() : DocumentIdentifier { };
+}
+
+bool BaseAudioContext::isSuspended() const
+{
+    return !document() || document()->activeDOMObjectsAreSuspended() || document()->activeDOMObjectsAreStopped();
+}
+
+void BaseAudioContext::visibilityStateChanged()
+{
+    // Do not suspend if audio is audible.
+    if (!document() || mediaState() == MediaProducer::IsPlayingAudio || m_isStopScheduled)
+        return;
+
+    if (document()->hidden()) {
+        if (state() == State::Running) {
+            RELEASE_LOG_IF_ALLOWED("visibilityStateChanged() Suspending playback after going to the background");
+            m_mediaSession->beginInterruption(PlatformMediaSession::EnteringBackground);
+        }
+    } else {
+        if (state() == State::Interrupted) {
+            RELEASE_LOG_IF_ALLOWED("visibilityStateChanged() Resuming playback after entering foreground");
+            m_mediaSession->endInterruption(PlatformMediaSession::MayResumePlaying);
+        }
+    }
+}
+
+bool BaseAudioContext::wouldTaintOrigin(const URL& url) const
+{
+    if (url.protocolIsData())
+        return false;
+
+    if (auto* document = this->document())
+        return !document->securityOrigin().canRequest(url);
+
+    return false;
+}
+
+ExceptionOr<Ref<AudioBuffer>> BaseAudioContext::createBuffer(unsigned numberOfChannels, size_t numberOfFrames, float sampleRate)
+{
+    auto audioBuffer = AudioBuffer::create(numberOfChannels, numberOfFrames, sampleRate);
+    if (!audioBuffer)
+        return Exception { NotSupportedError };
+    return audioBuffer.releaseNonNull();
+}
+
+ExceptionOr<Ref<AudioBuffer>> BaseAudioContext::createBuffer(ArrayBuffer& arrayBuffer, bool mixToMono)
+{
+    auto audioBuffer = AudioBuffer::createFromAudioFileData(arrayBuffer.data(), arrayBuffer.byteLength(), mixToMono, sampleRate());
+    if (!audioBuffer)
+        return Exception { SyntaxError };
+    return audioBuffer.releaseNonNull();
+}
+
+void BaseAudioContext::decodeAudioData(Ref<ArrayBuffer>&& audioData, RefPtr<AudioBufferCallback>&& successCallback, RefPtr<AudioBufferCallback>&& errorCallback)
+{
+    if (!m_audioDecoder)
+        m_audioDecoder = makeUnique<AsyncAudioDecoder>();
+    m_audioDecoder->decodeAsync(WTFMove(audioData), sampleRate(), WTFMove(successCallback), WTFMove(errorCallback));
+}
+
+ExceptionOr<Ref<AudioBufferSourceNode>> BaseAudioContext::createBufferSource()
+{
+    ALWAYS_LOG(LOGIDENTIFIER);
+
+    ASSERT(isMainThread());
+
+    if (m_isStopScheduled)
+        return Exception { InvalidStateError };
+
+    lazyInitialize();
+    Ref<AudioBufferSourceNode> node = AudioBufferSourceNode::create(*this, sampleRate());
+
+    // Because this is an AudioScheduledSourceNode, the context keeps a reference until it has finished playing.
+    // When this happens, AudioScheduledSourceNode::finish() calls BaseAudioContext::notifyNodeFinishedProcessing().
+    refNode(node);
+
+    return node;
+}
+
+ExceptionOr<Ref<ScriptProcessorNode>> BaseAudioContext::createScriptProcessor(size_t bufferSize, size_t numberOfInputChannels, size_t numberOfOutputChannels)
+{
+    ALWAYS_LOG(LOGIDENTIFIER);
+    
+    ASSERT(isMainThread());
+
+    if (m_isStopScheduled)
+        return Exception { InvalidStateError };
+
+    lazyInitialize();
+
+    // W3C Editor's Draft 06 June 2017
+    //  https://webaudio.github.io/web-audio-api/#widl-BaseAudioContext-createScriptProcessor-ScriptProcessorNode-unsigned-long-bufferSize-unsigned-long-numberOfInputChannels-unsigned-long-numberOfOutputChannels
+
+    // The bufferSize parameter determines the buffer size in units of sample-frames. If it's not passed in,
+    // or if the value is 0, then the implementation will choose the best buffer size for the given environment,
+    // which will be constant power of 2 throughout the lifetime of the node. ... If the value of this parameter
+    // is not one of the allowed power-of-2 values listed above, an IndexSizeError must be thrown.
+    switch (bufferSize) {
+    case 0:
+#if USE(AUDIO_SESSION)
+        // Pick a value between 256 (2^8) and 16384 (2^14), based on the buffer size of the current AudioSession:
+        bufferSize = 1 << std::max<size_t>(8, std::min<size_t>(14, std::log2(AudioSession::sharedSession().bufferSize())));
+#else
+        bufferSize = 2048;
+#endif
+        break;
+    case 256:
+    case 512:
+    case 1024:
+    case 2048:
+    case 4096:
+    case 8192:
+    case 16384:
+        break;
+    default:
+        return Exception { IndexSizeError };
+    }
+
+    // An IndexSizeError exception must be thrown if bufferSize or numberOfInputChannels or numberOfOutputChannels
+    // are outside the valid range. It is invalid for both numberOfInputChannels and numberOfOutputChannels to be zero.
+    // In this case an IndexSizeError must be thrown.
+
+    if (!numberOfInputChannels && !numberOfOutputChannels)
+        return Exception { NotSupportedError };
+
+    // This parameter [numberOfInputChannels] determines the number of channels for this node's input. Values of
+    // up to 32 must be supported. A NotSupportedError must be thrown if the number of channels is not supported.
+
+    if (numberOfInputChannels > maxNumberOfChannels())
+        return Exception { NotSupportedError };
+
+    // This parameter [numberOfOutputChannels] determines the number of channels for this node's output. Values of
+    // up to 32 must be supported. A NotSupportedError must be thrown if the number of channels is not supported.
+
+    if (numberOfOutputChannels > maxNumberOfChannels())
+        return Exception { NotSupportedError };
+
+    auto node = ScriptProcessorNode::create(*this, sampleRate(), bufferSize, numberOfInputChannels, numberOfOutputChannels);
+
+    refNode(node); // context keeps reference until we stop making javascript rendering callbacks
+    return node;
+}
+
+ExceptionOr<Ref<BiquadFilterNode>> BaseAudioContext::createBiquadFilter()
+{
+    ALWAYS_LOG(LOGIDENTIFIER);
+    
+    ASSERT(isMainThread());
+    if (m_isStopScheduled)
+        return Exception { InvalidStateError };
+
+    lazyInitialize();
+
+    return BiquadFilterNode::create(*this, sampleRate());
+}
+
+ExceptionOr<Ref<WaveShaperNode>> BaseAudioContext::createWaveShaper()
+{
+    ALWAYS_LOG(LOGIDENTIFIER);
+    
+    ASSERT(isMainThread());
+    if (m_isStopScheduled)
+        return Exception { InvalidStateError };
+
+    lazyInitialize();
+    return WaveShaperNode::create(*this);
+}
+
+ExceptionOr<Ref<PannerNode>> BaseAudioContext::createPanner()
+{
+    ALWAYS_LOG(LOGIDENTIFIER);
+    
+    ASSERT(isMainThread());
+    if (m_isStopScheduled)
+        return Exception { InvalidStateError };
+
+    lazyInitialize();
+    return PannerNode::create(*this, sampleRate());
+}
+
+ExceptionOr<Ref<ConvolverNode>> BaseAudioContext::createConvolver()
+{
+    ALWAYS_LOG(LOGIDENTIFIER);
+    
+    ASSERT(isMainThread());
+    if (m_isStopScheduled)
+        return Exception { InvalidStateError };
+
+    lazyInitialize();
+    return ConvolverNode::create(*this, sampleRate());
+}
+
+ExceptionOr<Ref<DynamicsCompressorNode>> BaseAudioContext::createDynamicsCompressor()
+{
+    ALWAYS_LOG(LOGIDENTIFIER);
+    
+    ASSERT(isMainThread());
+    if (m_isStopScheduled)
+        return Exception { InvalidStateError };
+
+    lazyInitialize();
+    return DynamicsCompressorNode::create(*this, sampleRate());
+}
+
+ExceptionOr<Ref<AnalyserNode>> BaseAudioContext::createAnalyser()
+{
+    ALWAYS_LOG(LOGIDENTIFIER);
+    
+    ASSERT(isMainThread());
+    if (m_isStopScheduled)
+        return Exception { InvalidStateError };
+
+    lazyInitialize();
+    return AnalyserNode::create(*this, sampleRate());
+}
+
+ExceptionOr<Ref<GainNode>> BaseAudioContext::createGain()
+{
+    ALWAYS_LOG(LOGIDENTIFIER);
+    
+    ASSERT(isMainThread());
+    if (m_isStopScheduled)
+        return Exception { InvalidStateError };
+
+    lazyInitialize();
+    return GainNode::create(*this, sampleRate());
+}
+
+ExceptionOr<Ref<DelayNode>> BaseAudioContext::createDelay(double maxDelayTime)
+{
+    ALWAYS_LOG(LOGIDENTIFIER);
+    
+    ASSERT(isMainThread());
+    if (m_isStopScheduled)
+        return Exception { InvalidStateError };
+
+    lazyInitialize();
+    return DelayNode::create(*this, sampleRate(), maxDelayTime);
+}
+
+ExceptionOr<Ref<ChannelSplitterNode>> BaseAudioContext::createChannelSplitter(size_t numberOfOutputs)
+{
+    ALWAYS_LOG(LOGIDENTIFIER);
+    
+    ASSERT(isMainThread());
+    if (m_isStopScheduled)
+        return Exception { InvalidStateError };
+
+    lazyInitialize();
+    auto node = ChannelSplitterNode::create(*this, sampleRate(), numberOfOutputs);
+    if (!node)
+        return Exception { IndexSizeError };
+    return node.releaseNonNull();
+}
+
+ExceptionOr<Ref<ChannelMergerNode>> BaseAudioContext::createChannelMerger(size_t numberOfInputs)
+{
+    ALWAYS_LOG(LOGIDENTIFIER);
+    
+    ASSERT(isMainThread());
+    if (m_isStopScheduled)
+        return Exception { InvalidStateError };
+
+    lazyInitialize();
+    auto node = ChannelMergerNode::create(*this, sampleRate(), numberOfInputs);
+    if (!node)
+        return Exception { IndexSizeError };
+    return node.releaseNonNull();
+}
+
+ExceptionOr<Ref<OscillatorNode>> BaseAudioContext::createOscillator()
+{
+    ALWAYS_LOG(LOGIDENTIFIER);
+    
+    ASSERT(isMainThread());
+    if (m_isStopScheduled)
+        return Exception { InvalidStateError };
+
+    lazyInitialize();
+
+    Ref<OscillatorNode> node = OscillatorNode::create(*this, sampleRate());
+
+    // Because this is an AudioScheduledSourceNode, the context keeps a reference until it has finished playing.
+    // When this happens, AudioScheduledSourceNode::finish() calls BaseAudioContext::notifyNodeFinishedProcessing().
+    refNode(node);
+
+    return node;
+}
+
+ExceptionOr<Ref<PeriodicWave>> BaseAudioContext::createPeriodicWave(Float32Array& real, Float32Array& imaginary)
+{
+    ALWAYS_LOG(LOGIDENTIFIER);
+    
+    ASSERT(isMainThread());
+    if (m_isStopScheduled)
+        return Exception { InvalidStateError };
+
+    if (real.length() != imaginary.length() || (real.length() > MaxPeriodicWaveLength) || !real.length())
+        return Exception { IndexSizeError };
+    lazyInitialize();
+    return PeriodicWave::create(sampleRate(), real, imaginary);
+}
+
+void BaseAudioContext::notifyNodeFinishedProcessing(AudioNode* node)
+{
+    ASSERT(isAudioThread());
+    m_finishedNodes.append(node);
+}
+
+void BaseAudioContext::derefFinishedSourceNodes()
+{
+    ASSERT(isGraphOwner());
+    ASSERT(isAudioThread() || isAudioThreadFinished());
+    for (auto& node : m_finishedNodes)
+        derefNode(*node);
+
+    m_finishedNodes.clear();
+}
+
+void BaseAudioContext::refNode(AudioNode& node)
+{
+    ASSERT(isMainThread());
+    AutoLocker locker(*this);
+    
+    node.ref(AudioNode::RefTypeConnection);
+    m_referencedNodes.append(&node);
+}
+
+void BaseAudioContext::derefNode(AudioNode& node)
+{
+    ASSERT(isGraphOwner());
+    
+    node.deref(AudioNode::RefTypeConnection);
+
+    ASSERT(m_referencedNodes.contains(&node));
+    m_referencedNodes.removeFirst(&node);
+}
+
+void BaseAudioContext::derefUnfinishedSourceNodes()
+{
+    ASSERT(isMainThread() && isAudioThreadFinished());
+    for (auto& node : m_referencedNodes)
+        node->deref(AudioNode::RefTypeConnection);
+
+    m_referencedNodes.clear();
+}
+
+void BaseAudioContext::lock(bool& mustReleaseLock)
+{
+    // Don't allow regular lock in real-time audio thread.
+    ASSERT(isMainThread());
+
+    Thread& thisThread = Thread::current();
+
+    if (&thisThread == m_graphOwnerThread) {
+        // We already have the lock.
+        mustReleaseLock = false;
+    } else {
+        // Acquire the lock.
+        m_contextGraphMutex.lock();
+        m_graphOwnerThread = &thisThread;
+        mustReleaseLock = true;
+    }
+}
+
+bool BaseAudioContext::tryLock(bool& mustReleaseLock)
+{
+    Thread& thisThread = Thread::current();
+    bool isAudioThread = &thisThread == audioThread();
+
+    // Try to catch cases of using try lock on main thread - it should use regular lock.
+    ASSERT(isAudioThread || isAudioThreadFinished());
+    
+    if (!isAudioThread) {
+        // In release build treat tryLock() as lock() (since above ASSERT(isAudioThread) never fires) - this is the best we can do.
+        lock(mustReleaseLock);
+        return true;
+    }
+    
+    bool hasLock;
+    
+    if (&thisThread == m_graphOwnerThread) {
+        // Thread already has the lock.
+        hasLock = true;
+        mustReleaseLock = false;
+    } else {
+        // Don't already have the lock - try to acquire it.
+        hasLock = m_contextGraphMutex.tryLock();
+        
+        if (hasLock)
+            m_graphOwnerThread = &thisThread;
+
+        mustReleaseLock = hasLock;
+    }
+    
+    return hasLock;
+}
+
+void BaseAudioContext::unlock()
+{
+    ASSERT(m_graphOwnerThread == &Thread::current());
+
+    m_graphOwnerThread = nullptr;
+    m_contextGraphMutex.unlock();
+}
+
+bool BaseAudioContext::isAudioThread() const
+{
+    return m_audioThread == &Thread::current();
+}
+
+bool BaseAudioContext::isGraphOwner() const
+{
+    return m_graphOwnerThread == &Thread::current();
+}
+
+void BaseAudioContext::addDeferredFinishDeref(AudioNode* node)
+{
+    ASSERT(isAudioThread());
+    m_deferredFinishDerefList.append(node);
+}
+
+void BaseAudioContext::handlePreRenderTasks()
+{
+    ASSERT(isAudioThread());
+
+    // At the beginning of every render quantum, try to update the internal rendering graph state (from main thread changes).
+    // It's OK if the tryLock() fails, we'll just take slightly longer to pick up the changes.
+    bool mustReleaseLock;
+    if (tryLock(mustReleaseLock)) {
+        // Fixup the state of any dirty AudioSummingJunctions and AudioNodeOutputs.
+        handleDirtyAudioSummingJunctions();
+        handleDirtyAudioNodeOutputs();
+
+        updateAutomaticPullNodes();
+
+        if (mustReleaseLock)
+            unlock();
+    }
+}
+
+void BaseAudioContext::handlePostRenderTasks()
+{
+    ASSERT(isAudioThread());
+
+    // Must use a tryLock() here too. Don't worry, the lock will very rarely be contended and this method is called frequently.
+    // The worst that can happen is that there will be some nodes which will take slightly longer than usual to be deleted or removed
+    // from the render graph (in which case they'll render silence).
+    bool mustReleaseLock;
+    if (tryLock(mustReleaseLock)) {
+        // Take care of finishing any derefs where the tryLock() failed previously.
+        handleDeferredFinishDerefs();
+
+        // Dynamically clean up nodes which are no longer needed.
+        derefFinishedSourceNodes();
+
+        // Don't delete in the real-time thread. Let the main thread do it.
+        // Ref-counted objects held by certain AudioNodes may not be thread-safe.
+        scheduleNodeDeletion();
+
+        // Fixup the state of any dirty AudioSummingJunctions and AudioNodeOutputs.
+        handleDirtyAudioSummingJunctions();
+        handleDirtyAudioNodeOutputs();
+
+        updateAutomaticPullNodes();
+
+        if (mustReleaseLock)
+            unlock();
+    }
+}
+
+void BaseAudioContext::handleDeferredFinishDerefs()
+{
+    ASSERT(isAudioThread() && isGraphOwner());
+    for (auto& node : m_deferredFinishDerefList)
+        node->finishDeref(AudioNode::RefTypeConnection);
+    
+    m_deferredFinishDerefList.clear();
+}
+
+void BaseAudioContext::markForDeletion(AudioNode& node)
+{
+    ASSERT(isGraphOwner());
+
+    if (isAudioThreadFinished())
+        m_nodesToDelete.append(&node);
+    else
+        m_nodesMarkedForDeletion.append(&node);
+
+    // This is probably the best time for us to remove the node from automatic pull list,
+    // since all connections are gone and we hold the graph lock. Then when handlePostRenderTasks()
+    // gets a chance to schedule the deletion work, updateAutomaticPullNodes() also gets a chance to
+    // modify m_renderingAutomaticPullNodes.
+    removeAutomaticPullNode(node);
+}
+
+void BaseAudioContext::scheduleNodeDeletion()
+{
+    bool isGood = m_isInitialized && isGraphOwner();
+    ASSERT(isGood);
+    if (!isGood)
+        return;
+
+    // Make sure to call deleteMarkedNodes() on main thread.    
+    if (m_nodesMarkedForDeletion.size() && !m_isDeletionScheduled) {
+        m_nodesToDelete.appendVector(m_nodesMarkedForDeletion);
+        m_nodesMarkedForDeletion.clear();
+
+        m_isDeletionScheduled = true;
+
+        callOnMainThread([protectedThis = makeRef(*this)]() mutable {
+            protectedThis->deleteMarkedNodes();
+        });
+    }
+}
+
+void BaseAudioContext::deleteMarkedNodes()
+{
+    ASSERT(isMainThread());
+
+    // Protect this object from being deleted before we release the mutex locked by AutoLocker.
+    auto protectedThis = makeRef(*this);
+    {
+        AutoLocker locker(*this);
+
+        while (m_nodesToDelete.size()) {
+            AudioNode* node = m_nodesToDelete.takeLast();
+
+            // Before deleting the node, clear out any AudioNodeInputs from m_dirtySummingJunctions.
+            unsigned numberOfInputs = node->numberOfInputs();
+            for (unsigned i = 0; i < numberOfInputs; ++i)
+                m_dirtySummingJunctions.remove(node->input(i));
+
+            // Before deleting the node, clear out any AudioNodeOutputs from m_dirtyAudioNodeOutputs.
+            unsigned numberOfOutputs = node->numberOfOutputs();
+            for (unsigned i = 0; i < numberOfOutputs; ++i)
+                m_dirtyAudioNodeOutputs.remove(node->output(i));
+
+            // Finally, delete it.
+            delete node;
+        }
+        m_isDeletionScheduled = false;
+    }
+}
+
+void BaseAudioContext::markSummingJunctionDirty(AudioSummingJunction* summingJunction)
+{
+    ASSERT(isGraphOwner());    
+    m_dirtySummingJunctions.add(summingJunction);
+}
+
+void BaseAudioContext::removeMarkedSummingJunction(AudioSummingJunction* summingJunction)
+{
+    ASSERT(isMainThread());
+    AutoLocker locker(*this);
+    m_dirtySummingJunctions.remove(summingJunction);
+}
+
+EventTargetInterface BaseAudioContext::eventTargetInterface() const
+{
+    return BaseAudioContextEventTargetInterfaceType;
+}
+
+void BaseAudioContext::markAudioNodeOutputDirty(AudioNodeOutput* output)
+{
+    ASSERT(isGraphOwner());
+    m_dirtyAudioNodeOutputs.add(output);
+}
+
+void BaseAudioContext::handleDirtyAudioSummingJunctions()
+{
+    ASSERT(isGraphOwner());    
+
+    for (auto& junction : m_dirtySummingJunctions)
+        junction->updateRenderingState();
+
+    m_dirtySummingJunctions.clear();
+}
+
+void BaseAudioContext::handleDirtyAudioNodeOutputs()
+{
+    ASSERT(isGraphOwner());    
+
+    for (auto& output : m_dirtyAudioNodeOutputs)
+        output->updateRenderingState();
+
+    m_dirtyAudioNodeOutputs.clear();
+}
+
+void BaseAudioContext::addAutomaticPullNode(AudioNode& node)
+{
+    ASSERT(isGraphOwner());
+
+    if (m_automaticPullNodes.add(&node).isNewEntry)
+        m_automaticPullNodesNeedUpdating = true;
+}
+
+void BaseAudioContext::removeAutomaticPullNode(AudioNode& node)
+{
+    ASSERT(isGraphOwner());
+
+    if (m_automaticPullNodes.remove(&node))
+        m_automaticPullNodesNeedUpdating = true;
+}
+
+void BaseAudioContext::updateAutomaticPullNodes()
+{
+    ASSERT(isGraphOwner());
+
+    if (m_automaticPullNodesNeedUpdating) {
+        // Copy from m_automaticPullNodes to m_renderingAutomaticPullNodes.
+        m_renderingAutomaticPullNodes.resize(m_automaticPullNodes.size());
+
+        unsigned i = 0;
+        for (auto& output : m_automaticPullNodes)
+            m_renderingAutomaticPullNodes[i++] = output;
+
+        m_automaticPullNodesNeedUpdating = false;
+    }
+}
+
+void BaseAudioContext::processAutomaticPullNodes(size_t framesToProcess)
+{
+    ASSERT(isAudioThread());
+
+    for (auto& node : m_renderingAutomaticPullNodes)
+        node->processIfNecessary(framesToProcess);
+}
+
+ScriptExecutionContext* AudioContextBase::scriptExecutionContext() const
+{
+    return ActiveDOMObject::scriptExecutionContext();
+}
+
+void BaseAudioContext::nodeWillBeginPlayback()
+{
+    // Called by scheduled AudioNodes when clients schedule their start times.
+    // Prior to the introduction of suspend(), resume(), and stop(), starting
+    // a scheduled AudioNode would remove the user-gesture restriction, if present,
+    // and would thus unmute the context. Now that AudioContext stays in the
+    // "suspended" state if a user-gesture restriction is present, starting a
+    // schedule AudioNode should set the state to "running", but only if the
+    // user-gesture restriction is set.
+    if (userGestureRequiredForAudioStart())
+        startRendering();
+}
+
+static bool shouldDocumentAllowWebAudioToAutoPlay(const Document& document)
+{
+    if (document.processingUserGestureForMedia() || document.isCapturing())
+        return true;
+    return document.quirks().shouldAutoplayWebAudioForArbitraryUserGesture() && document.topDocument().hasHadUserInteraction();
+}
+
+bool BaseAudioContext::willBeginPlayback()
+{
+    auto* document = this->document();
+    if (!document)
+        return false;
+
+    if (userGestureRequiredForAudioStart()) {
+        if (!shouldDocumentAllowWebAudioToAutoPlay(*document)) {
+            ALWAYS_LOG(LOGIDENTIFIER, "returning false, not processing user gesture or capturing");
+            return false;
+        }
+        removeBehaviorRestriction(BaseAudioContext::RequireUserGestureForAudioStartRestriction);
+    }
+
+    if (pageConsentRequiredForAudioStart()) {
+        auto* page = document->page();
+        if (page && !page->canStartMedia()) {
+            document->addMediaCanStartListener(*this);
+            ALWAYS_LOG(LOGIDENTIFIER, "returning false, page doesn't allow media to start");
+            return false;
+        }
+        removeBehaviorRestriction(BaseAudioContext::RequirePageConsentForAudioStartRestriction);
+    }
+    
+    auto willBegin = m_mediaSession->clientWillBeginPlayback();
+    ALWAYS_LOG(LOGIDENTIFIER, "returning ", willBegin);
+    
+    return willBegin;
+}
+
+bool BaseAudioContext::willPausePlayback()
+{
+    auto* document = this->document();
+    if (!document)
+        return false;
+
+    if (userGestureRequiredForAudioStart()) {
+        if (!document->processingUserGestureForMedia())
+            return false;
+        removeBehaviorRestriction(BaseAudioContext::RequireUserGestureForAudioStartRestriction);
+    }
+
+    if (pageConsentRequiredForAudioStart()) {
+        auto* page = document->page();
+        if (page && !page->canStartMedia()) {
+            document->addMediaCanStartListener(*this);
+            return false;
+        }
+        removeBehaviorRestriction(BaseAudioContext::RequirePageConsentForAudioStartRestriction);
+    }
+    
+    return m_mediaSession->clientWillPausePlayback();
+}
+
+void BaseAudioContext::startRendering()
+{
+    ALWAYS_LOG(LOGIDENTIFIER);
+    if (m_isStopScheduled || !willBeginPlayback())
+        return;
+
+    makePendingActivity();
+
+    destination()->startRendering();
+    setState(State::Running);
+}
+
+void BaseAudioContext::mediaCanStart(Document& document)
+{
+    ASSERT_UNUSED(document, &document == this->document());
+    removeBehaviorRestriction(BaseAudioContext::RequirePageConsentForAudioStartRestriction);
+    mayResumePlayback(true);
+}
+
+MediaProducer::MediaStateFlags BaseAudioContext::mediaState() const
+{
+    if (!m_isStopScheduled && m_destinationNode && m_destinationNode->isPlayingAudio())
+        return MediaProducer::IsPlayingAudio;
+
+    return MediaProducer::IsNotPlaying;
+}
+
+void BaseAudioContext::pageMutedStateDidChange()
+{
+    if (m_destinationNode && document() && document()->page())
+        m_destinationNode->setMuted(document()->page()->isAudioMuted());
+}
+
+void BaseAudioContext::isPlayingAudioDidChange()
+{
+    // Make sure to call Document::updateIsPlayingMedia() on the main thread, since
+    // we could be on the audio I/O thread here and the call into WebCore could block.
+    callOnMainThread([protectedThis = makeRef(*this)] {
+        if (protectedThis->document())
+            protectedThis->document()->updateIsPlayingMedia();
+    });
+}
+
+void BaseAudioContext::finishedRendering(bool didRendering)
+{
+    ASSERT(isOfflineContext());
+    ASSERT(isMainThread());
+    if (!isMainThread())
+        return;
+
+    auto clearPendingActivityIfExitEarly = WTF::makeScopeExit([this] {
+        clearPendingActivity();
+    });
+
+
+    ALWAYS_LOG(LOGIDENTIFIER);
+
+    if (!didRendering)
+        return;
+
+    AudioBuffer* renderedBuffer = m_renderTarget.get();
+    setState(State::Closed);
+
+    ASSERT(renderedBuffer);
+    if (!renderedBuffer)
+        return;
+
+    // Avoid firing the event if the document has already gone away.
+    if (m_isStopScheduled)
+        return;
+
+    clearPendingActivityIfExitEarly.release();
+    m_eventQueue->enqueueEvent(OfflineAudioCompletionEvent::create(renderedBuffer));
+}
+
+void BaseAudioContext::dispatchEvent(Event& event)
+{
+    EventTarget::dispatchEvent(event);
+    if (event.eventInterface() == OfflineAudioCompletionEventInterfaceType)
+        clearPendingActivity();
+}
+
+void BaseAudioContext::incrementActiveSourceCount()
+{
+    ++m_activeSourceCount;
+}
+
+void BaseAudioContext::decrementActiveSourceCount()
+{
+    --m_activeSourceCount;
+}
+
+void BaseAudioContext::suspendRendering(DOMPromiseDeferred<void>&& promise)
+{
+    if (isOfflineContext() || m_isStopScheduled) {
+        promise.reject(InvalidStateError);
+        return;
+    }
+
+    if (m_state == State::Suspended) {
+        promise.resolve();
+        return;
+    }
+
+    if (m_state == State::Closed || m_state == State::Interrupted || !m_destinationNode) {
+        promise.reject();
+        return;
+    }
+
+    addReaction(State::Suspended, WTFMove(promise));
+
+    if (!willPausePlayback())
+        return;
+
+    lazyInitialize();
+
+    m_destinationNode->suspend([this, protectedThis = makeRef(*this)] {
+        setState(State::Suspended);
+    });
+}
+
+void BaseAudioContext::resumeRendering(DOMPromiseDeferred<void>&& promise)
+{
+    if (isOfflineContext() || m_isStopScheduled) {
+        promise.reject(InvalidStateError);
+        return;
+    }
+
+    if (m_state == State::Running) {
+        promise.resolve();
+        return;
+    }
+
+    if (m_state == State::Closed || !m_destinationNode) {
+        promise.reject();
+        return;
+    }
+
+    addReaction(State::Running, WTFMove(promise));
+
+    if (!willBeginPlayback())
+        return;
+
+    lazyInitialize();
+
+    m_destinationNode->resume([this, protectedThis = makeRef(*this)] {
+        setState(State::Running);
+    });
+}
+
+void BaseAudioContext::suspendPlayback()
+{
+    if (!m_destinationNode || m_state == State::Closed)
+        return;
+
+    if (m_state == State::Suspended) {
+        if (m_mediaSession->state() == PlatformMediaSession::Interrupted)
+            setState(State::Interrupted);
+        return;
+    }
+
+    lazyInitialize();
+
+    m_destinationNode->suspend([this, protectedThis = makeRef(*this)] {
+        bool interrupted = m_mediaSession->state() == PlatformMediaSession::Interrupted;
+        setState(interrupted ? State::Interrupted : State::Suspended);
+    });
+}
+
+void BaseAudioContext::mayResumePlayback(bool shouldResume)
+{
+    if (!m_destinationNode || m_state == State::Closed || m_state == State::Running)
+        return;
+
+    if (!shouldResume) {
+        setState(State::Suspended);
+        return;
+    }
+
+    if (!willBeginPlayback())
+        return;
+
+    lazyInitialize();
+
+    m_destinationNode->resume([this, protectedThis = makeRef(*this)] {
+        setState(State::Running);
+    });
+}
+
+void BaseAudioContext::postTask(WTF::Function<void()>&& task)
+{
+    if (m_isStopScheduled)
+        return;
+
+    m_scriptExecutionContext->postTask(WTFMove(task));
+}
+
+const SecurityOrigin* BaseAudioContext::origin() const
+{
+    return m_scriptExecutionContext ? m_scriptExecutionContext->securityOrigin() : nullptr;
+}
+
+void BaseAudioContext::addConsoleMessage(MessageSource source, MessageLevel level, const String& message)
+{
+    if (m_scriptExecutionContext)
+        m_scriptExecutionContext->addConsoleMessage(source, level, message);
+}
+
+void BaseAudioContext::clearPendingActivity()
+{
+    if (!m_pendingActivity)
+        return;
+    m_pendingActivity = nullptr;
+    // FIXME: Remove this specific deref() and ref() call in makePendingActivity().
+    deref();
+}
+
+void BaseAudioContext::makePendingActivity()
+{
+    if (m_pendingActivity)
+        return;
+    m_pendingActivity = ActiveDOMObject::makePendingActivity(*this);
+    ref();
+}
+
+#if !RELEASE_LOG_DISABLED
+WTFLogChannel& BaseAudioContext::logChannel() const
+{
+    return LogMedia;
+}
+#endif
+
+} // namespace WebCore
+
+#endif // ENABLE(WEB_AUDIO)
diff --git a/Source/WebCore/Modules/webaudio/BaseAudioContext.h b/Source/WebCore/Modules/webaudio/BaseAudioContext.h
new file mode 100644 (file)
index 0000000..70d4752
--- /dev/null
@@ -0,0 +1,519 @@
+/*
+ * Copyright (C) 2010 Google Inc. All rights reserved.
+ * Copyright (C) 2016-2020 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1.  Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2.  Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "ActiveDOMObject.h"
+#include "AsyncAudioDecoder.h"
+#include "AudioBus.h"
+#include "AudioContextState.h"
+#include "AudioDestinationNode.h"
+#include "EventTarget.h"
+#include "MediaCanStartListener.h"
+#include "MediaProducer.h"
+#include "PlatformMediaSession.h"
+#include "ScriptExecutionContext.h"
+#include "VisibilityChangeClient.h"
+#include <JavaScriptCore/ConsoleTypes.h>
+#include <JavaScriptCore/Float32Array.h>
+#include <atomic>
+#include <wtf/HashSet.h>
+#include <wtf/LoggerHelper.h>
+#include <wtf/MainThread.h>
+#include <wtf/RefPtr.h>
+#include <wtf/ThreadSafeRefCounted.h>
+#include <wtf/Threading.h>
+#include <wtf/UniqueRef.h>
+#include <wtf/Vector.h>
+#include <wtf/text/AtomStringHash.h>
+
+namespace WebCore {
+
+class AnalyserNode;
+class AudioBuffer;
+class AudioBufferCallback;
+class AudioBufferSourceNode;
+class AudioListener;
+class AudioSummingJunction;
+class BiquadFilterNode;
+class ChannelMergerNode;
+class ChannelSplitterNode;
+class ConvolverNode;
+class DelayNode;
+class Document;
+class DynamicsCompressorNode;
+class GainNode;
+class HTMLMediaElement;
+class MainThreadGenericEventQueue;
+class MediaElementAudioSourceNode;
+class MediaStream;
+class MediaStreamAudioDestinationNode;
+class MediaStreamAudioSourceNode;
+class OscillatorNode;
+class PannerNode;
+class PeriodicWave;
+class ScriptProcessorNode;
+class SecurityOrigin;
+class WaveShaperNode;
+
+template<typename IDLType> class DOMPromiseDeferred;
+
+// FIXME: We need to rename this now that there is also BaseAudioContext.
+class AudioContextBase
+    : public ActiveDOMObject
+    , public ThreadSafeRefCounted<AudioContextBase>
+    , public EventTargetWithInlineData
+    , public MediaCanStartListener
+    , public MediaProducer
+#if !RELEASE_LOG_DISABLED
+    , public LoggerHelper
+#endif
+{
+    WTF_MAKE_ISO_ALLOCATED(AudioContextBase);
+public:
+    virtual ~AudioContextBase() = default;
+
+    // Reconcile ref/deref which are defined both in ThreadSafeRefCounted and EventTarget.
+    using ThreadSafeRefCounted::ref;
+    using ThreadSafeRefCounted::deref;
+
+    Document* document() const;
+
+    virtual bool isInitialized() const = 0;
+
+    virtual size_t currentSampleFrame() const = 0;
+    virtual float sampleRate() const = 0;
+    virtual double currentTime() const = 0;
+    virtual bool isGraphOwner() const = 0;
+
+    virtual void setAudioThread(Thread&) = 0;
+    virtual bool isAudioThread() const = 0;
+    virtual bool isAudioThreadFinished() = 0;
+
+    virtual void isPlayingAudioDidChange() = 0;
+    virtual void nodeWillBeginPlayback() = 0;
+
+    virtual void postTask(WTF::Function<void()>&&) = 0;
+    virtual bool isStopped() const = 0;
+    virtual const SecurityOrigin* origin() const = 0;
+    virtual void addConsoleMessage(MessageSource, MessageLevel, const String& message) = 0;
+
+    virtual void markForDeletion(AudioNode&) = 0;
+    virtual void deleteMarkedNodes() = 0;
+
+    virtual void handlePreRenderTasks() = 0;
+    virtual void handlePostRenderTasks() = 0;
+    virtual void processAutomaticPullNodes(size_t framesToProcess) = 0;
+    virtual void addDeferredFinishDeref(AudioNode*) = 0;
+
+    virtual void removeMarkedSummingJunction(AudioSummingJunction*) = 0;
+    virtual void markSummingJunctionDirty(AudioSummingJunction*) = 0;
+    virtual void markAudioNodeOutputDirty(AudioNodeOutput*) = 0;
+
+    enum BehaviorRestrictionFlags {
+        NoRestrictions = 0,
+        RequireUserGestureForAudioStartRestriction = 1 << 0,
+        RequirePageConsentForAudioStartRestriction = 1 << 1,
+    };
+    typedef unsigned BehaviorRestrictions;
+    virtual BehaviorRestrictions behaviorRestrictions() const = 0;
+    virtual void addBehaviorRestriction(BehaviorRestrictions) = 0;
+    virtual void removeBehaviorRestriction(BehaviorRestrictions) = 0;
+
+#if !RELEASE_LOG_DISABLED
+    virtual const void* nextAudioNodeLogIdentifier() = 0;
+    virtual const void* nextAudioParameterLogIdentifier() = 0;
+#endif
+
+    virtual void addAutomaticPullNode(AudioNode&) = 0;
+    virtual void removeAutomaticPullNode(AudioNode&) = 0;
+
+    virtual void notifyNodeFinishedProcessing(AudioNode*) = 0;
+
+    virtual void finishedRendering(bool didRendering) = 0;
+
+    virtual void incrementConnectionCount() = 0;
+    virtual void incrementActiveSourceCount() = 0;
+    virtual void decrementActiveSourceCount() = 0;
+
+    virtual bool isOfflineContext() const = 0;
+    virtual bool isBaseAudioContext() const = 0;
+    virtual bool isWebKitAudioContext() const = 0;
+
+    // mustReleaseLock is set to true if we acquired the lock in this method call and caller must unlock(), false if it was previously acquired.
+    virtual void lock(bool& mustReleaseLock) = 0;
+    virtual bool tryLock(bool& mustReleaseLock) = 0;
+    virtual void unlock() = 0;
+
+    class AutoLocker {
+    public:
+        explicit AutoLocker(AudioContextBase& context)
+            : m_context(context)
+        {
+            m_context.lock(m_mustReleaseLock);
+        }
+
+        ~AutoLocker()
+        {
+            if (m_mustReleaseLock)
+                m_context.unlock();
+        }
+
+    private:
+        AudioContextBase& m_context;
+        bool m_mustReleaseLock;
+    };
+
+    // EventTarget
+    ScriptExecutionContext* scriptExecutionContext() const final;
+    void refEventTarget() override { ref(); }
+    void derefEventTarget() override { deref(); }
+
+protected:
+    explicit AudioContextBase(Document&);
+};
+
+// AudioContext is the cornerstone of the web audio API and all AudioNodes are created from it.
+// For thread safety between the audio thread and the main thread, it has a rendering graph locking mechanism. 
+
+class BaseAudioContext
+    : public AudioContextBase
+    , private PlatformMediaSessionClient
+    , private VisibilityChangeClient
+{
+    WTF_MAKE_ISO_ALLOCATED(BaseAudioContext);
+public:
+    virtual ~BaseAudioContext();
+
+    bool isInitialized() const final;
+    
+    bool isOfflineContext() const final { return m_isOfflineContext; }
+
+    DocumentIdentifier hostingDocumentIdentifier() const final;
+
+    AudioDestinationNode* destination() { return m_destinationNode.get(); }
+    size_t currentSampleFrame() const final { return m_destinationNode ? m_destinationNode->currentSampleFrame() : 0; }
+    double currentTime() const final { return m_destinationNode ? m_destinationNode->currentTime() : 0.; }
+    float sampleRate() const final { return m_destinationNode ? m_destinationNode->sampleRate() : 0.f; }
+    unsigned long activeSourceCount() const { return static_cast<unsigned long>(m_activeSourceCount); }
+
+    void incrementActiveSourceCount() final;
+    void decrementActiveSourceCount() final;
+    
+    ExceptionOr<Ref<AudioBuffer>> createBuffer(unsigned numberOfChannels, size_t numberOfFrames, float sampleRate);
+    ExceptionOr<Ref<AudioBuffer>> createBuffer(ArrayBuffer&, bool mixToMono);
+
+    // Asynchronous audio file data decoding.
+    void decodeAudioData(Ref<ArrayBuffer>&&, RefPtr<AudioBufferCallback>&&, RefPtr<AudioBufferCallback>&&);
+
+    AudioListener* listener() { return m_listener.get(); }
+
+    void suspendRendering(DOMPromiseDeferred<void>&&);
+    void resumeRendering(DOMPromiseDeferred<void>&&);
+
+    using State = AudioContextState;
+    State state() const { return m_state; }
+    bool isClosed() const { return m_state == State::Closed; }
+
+    bool wouldTaintOrigin(const URL&) const;
+
+    // The AudioNode create methods are called on the main thread (from JavaScript).
+    ExceptionOr<Ref<AudioBufferSourceNode>> createBufferSource();
+    ExceptionOr<Ref<GainNode>> createGain();
+    ExceptionOr<Ref<BiquadFilterNode>> createBiquadFilter();
+    ExceptionOr<Ref<WaveShaperNode>> createWaveShaper();
+    ExceptionOr<Ref<DelayNode>> createDelay(double maxDelayTime);
+    ExceptionOr<Ref<PannerNode>> createPanner();
+    ExceptionOr<Ref<ConvolverNode>> createConvolver();
+    ExceptionOr<Ref<DynamicsCompressorNode>> createDynamicsCompressor();
+    ExceptionOr<Ref<AnalyserNode>> createAnalyser();
+    ExceptionOr<Ref<ScriptProcessorNode>> createScriptProcessor(size_t bufferSize, size_t numberOfInputChannels, size_t numberOfOutputChannels);
+    ExceptionOr<Ref<ChannelSplitterNode>> createChannelSplitter(size_t numberOfOutputs);
+    ExceptionOr<Ref<ChannelMergerNode>> createChannelMerger(size_t numberOfInputs);
+    ExceptionOr<Ref<OscillatorNode>> createOscillator();
+    ExceptionOr<Ref<PeriodicWave>> createPeriodicWave(Float32Array& real, Float32Array& imaginary);
+
+    // When a source node has no more processing to do (has finished playing), then it tells the context to dereference it.
+    void notifyNodeFinishedProcessing(AudioNode*) final;
+
+    // Called at the start of each render quantum.
+    void handlePreRenderTasks() final;
+
+    // Called at the end of each render quantum.
+    void handlePostRenderTasks() final;
+
+    // Called periodically at the end of each render quantum to dereference finished source nodes.
+    void derefFinishedSourceNodes();
+
+    // We schedule deletion of all marked nodes at the end of each realtime render quantum.
+    void markForDeletion(AudioNode&) final;
+    void deleteMarkedNodes() final;
+
+    // AudioContext can pull node(s) at the end of each render quantum even when they are not connected to any downstream nodes.
+    // These two methods are called by the nodes who want to add/remove themselves into/from the automatic pull lists.
+    void addAutomaticPullNode(AudioNode&) final;
+    void removeAutomaticPullNode(AudioNode&) final;
+
+    // Called right before handlePostRenderTasks() to handle nodes which need to be pulled even when they are not connected to anything.
+    void processAutomaticPullNodes(size_t framesToProcess) final;
+
+    // Keeps track of the number of connections made.
+    void incrementConnectionCount() final
+    {
+        ASSERT(isMainThread());
+        m_connectionCount++;
+    }
+
+    unsigned connectionCount() const { return m_connectionCount; }
+
+    //
+    // Thread Safety and Graph Locking:
+    //
+    
+    void setAudioThread(Thread& thread) final { m_audioThread = &thread; } // FIXME: check either not initialized or the same
+    Thread* audioThread() const { return m_audioThread; }
+    bool isAudioThread() const final;
+
+    // Returns true only after the audio thread has been started and then shutdown.
+    bool isAudioThreadFinished() final { return m_isAudioThreadFinished; }
+    
+    // mustReleaseLock is set to true if we acquired the lock in this method call and caller must unlock(), false if it was previously acquired.
+    void lock(bool& mustReleaseLock) final;
+
+    // Returns true if we own the lock.
+    // mustReleaseLock is set to true if we acquired the lock in this method call and caller must unlock(), false if it was previously acquired.
+    bool tryLock(bool& mustReleaseLock) final;
+
+    void unlock() final;
+
+    // Returns true if this thread owns the context's lock.
+    bool isGraphOwner() const final;
+
+    // Returns the maximum number of channels we can support.
+    static unsigned maxNumberOfChannels() { return MaxNumberOfChannels; }
+    
+    // In AudioNode::deref() a tryLock() is used for calling finishDeref(), but if it fails keep track here.
+    void addDeferredFinishDeref(AudioNode*) final;
+
+    // In the audio thread at the start of each render cycle, we'll call handleDeferredFinishDerefs().
+    void handleDeferredFinishDerefs();
+
+    // Only accessed when the graph lock is held.
+    void markSummingJunctionDirty(AudioSummingJunction*) final;
+    void markAudioNodeOutputDirty(AudioNodeOutput*) final;
+
+    // Must be called on main thread.
+    void removeMarkedSummingJunction(AudioSummingJunction*) final;
+
+    // EventTarget
+    EventTargetInterface eventTargetInterface() const final;
+
+    void startRendering();
+    void finishedRendering(bool didRendering) final;
+
+    static unsigned s_hardwareContextCount;
+
+    // Restrictions to change default behaviors.
+    BehaviorRestrictions behaviorRestrictions() const final { return m_restrictions; }
+    void addBehaviorRestriction(BehaviorRestrictions restriction) final { m_restrictions |= restriction; }
+    void removeBehaviorRestriction(BehaviorRestrictions restriction) final { m_restrictions &= ~restriction; }
+
+    void isPlayingAudioDidChange() final;
+
+    void nodeWillBeginPlayback() final;
+
+#if !RELEASE_LOG_DISABLED
+    const Logger& logger() const final { return m_logger.get(); }
+    const void* logIdentifier() const final { return m_logIdentifier; }
+    WTFLogChannel& logChannel() const final;
+    const void* nextAudioNodeLogIdentifier() final { return childLogIdentifier(m_logIdentifier, ++m_nextAudioNodeIdentifier); }
+    const void* nextAudioParameterLogIdentifier() final { return childLogIdentifier(m_logIdentifier, ++m_nextAudioParameterIdentifier); }
+#endif
+
+    void postTask(WTF::Function<void()>&&) final;
+    bool isStopped() const final { return m_isStopScheduled; }
+    const SecurityOrigin* origin() const final;
+    void addConsoleMessage(MessageSource, MessageLevel, const String& message) final;
+
+protected:
+    explicit BaseAudioContext(Document&);
+    BaseAudioContext(Document&, AudioBuffer* renderTarget);
+    
+    static bool isSampleRateRangeGood(float sampleRate);
+    void clearPendingActivity();
+    void makePendingActivity();
+
+    AudioDestinationNode* destinationNode() const { return m_destinationNode.get(); }
+
+    void lazyInitialize();
+    void uninitialize();
+
+#if !RELEASE_LOG_DISABLED
+    const char* logClassName() const final { return "BaseAudioContext"; }
+#endif
+
+    // The context itself keeps a reference to all source nodes.  The source nodes, then reference all nodes they're connected to.
+    // In turn, these nodes reference all nodes they're connected to.  All nodes are ultimately connected to the AudioDestinationNode.
+    // When the context dereferences a source node, it will be deactivated from the rendering graph along with all other nodes it is
+    // uniquely connected to.  See the AudioNode::ref() and AudioNode::deref() methods for more details.
+    void refNode(AudioNode&);
+    void derefNode(AudioNode&);
+
+    void addReaction(State, DOMPromiseDeferred<void>&&);
+    void setState(State);
+
+private:
+    void constructCommon();
+
+    bool willBeginPlayback();
+    bool willPausePlayback();
+
+    bool userGestureRequiredForAudioStart() const { return !isOfflineContext() && m_restrictions & RequireUserGestureForAudioStartRestriction; }
+    bool pageConsentRequiredForAudioStart() const { return !isOfflineContext() && m_restrictions & RequirePageConsentForAudioStartRestriction; }
+
+    void clear();
+
+    void scheduleNodeDeletion();
+
+    void mediaCanStart(Document&) override;
+
+    // EventTarget
+    void dispatchEvent(Event&) final;
+
+    // MediaProducer
+    MediaProducer::MediaStateFlags mediaState() const override;
+    void pageMutedStateDidChange() override;
+
+    // ActiveDOMObject API.
+    void suspend(ReasonForSuspension) final;
+    void resume() final;
+    void stop() override;
+    const char* activeDOMObjectName() const override;
+
+    // When the context goes away, there might still be some sources which haven't finished playing.
+    // Make sure to dereference them here.
+    void derefUnfinishedSourceNodes();
+
+    // PlatformMediaSessionClient
+    PlatformMediaSession::MediaType mediaType() const override { return PlatformMediaSession::MediaType::WebAudio; }
+    PlatformMediaSession::MediaType presentationType() const override { return PlatformMediaSession::MediaType::WebAudio; }
+    void mayResumePlayback(bool shouldResume) override;
+    void suspendPlayback() override;
+    bool canReceiveRemoteControlCommands() const override { return false; }
+    void didReceiveRemoteControlCommand(PlatformMediaSession::RemoteControlCommandType, const PlatformMediaSession::RemoteCommandArgument*) override { }
+    bool supportsSeeking() const override { return false; }
+    bool shouldOverrideBackgroundPlaybackRestriction(PlatformMediaSession::InterruptionType) const override { return false; }
+    bool canProduceAudio() const final { return true; }
+    bool isSuspended() const final;
+
+    void visibilityStateChanged() final;
+
+    bool isBaseAudioContext() const final { return true; }
+    bool isWebKitAudioContext() const final { return false; }
+
+    void handleDirtyAudioSummingJunctions();
+    void handleDirtyAudioNodeOutputs();
+
+    void updateAutomaticPullNodes();
+
+#if !RELEASE_LOG_DISABLED
+    Ref<Logger> m_logger;
+    const void* m_logIdentifier;
+    uint64_t m_nextAudioNodeIdentifier { 0 };
+    uint64_t m_nextAudioParameterIdentifier { 0 };
+#endif
+
+    // Only accessed in the audio thread.
+    Vector<AudioNode*> m_finishedNodes;
+
+    // We don't use RefPtr<AudioNode> here because AudioNode has a more complex ref() / deref() implementation
+    // with an optional argument for refType.  We need to use the special refType: RefTypeConnection
+    // Either accessed when the graph lock is held, or on the main thread when the audio thread has finished.
+    Vector<AudioNode*> m_referencedNodes;
+
+    // Accumulate nodes which need to be deleted here.
+    // This is copied to m_nodesToDelete at the end of a render cycle in handlePostRenderTasks(), where we're assured of a stable graph
+    // state which will have no references to any of the nodes in m_nodesToDelete once the context lock is released
+    // (when handlePostRenderTasks() has completed).
+    Vector<AudioNode*> m_nodesMarkedForDeletion;
+
+    // They will be scheduled for deletion (on the main thread) at the end of a render cycle (in realtime thread).
+    Vector<AudioNode*> m_nodesToDelete;
+
+    bool m_isDeletionScheduled { false };
+    bool m_isStopScheduled { false };
+    bool m_isInitialized { false };
+    bool m_isAudioThreadFinished { false };
+    bool m_automaticPullNodesNeedUpdating { false };
+    bool m_isOfflineContext { false };
+
+    // Only accessed when the graph lock is held.
+    HashSet<AudioSummingJunction*> m_dirtySummingJunctions;
+    HashSet<AudioNodeOutput*> m_dirtyAudioNodeOutputs;
+
+    // For the sake of thread safety, we maintain a seperate Vector of automatic pull nodes for rendering in m_renderingAutomaticPullNodes.
+    // It will be copied from m_automaticPullNodes by updateAutomaticPullNodes() at the very start or end of the rendering quantum.
+    HashSet<AudioNode*> m_automaticPullNodes;
+    Vector<AudioNode*> m_renderingAutomaticPullNodes;
+    // Only accessed in the audio thread.
+    Vector<AudioNode*> m_deferredFinishDerefList;
+    Vector<Vector<DOMPromiseDeferred<void>>> m_stateReactions;
+
+    std::unique_ptr<PlatformMediaSession> m_mediaSession;
+    UniqueRef<MainThreadGenericEventQueue> m_eventQueue;
+
+    RefPtr<AudioBuffer> m_renderTarget;
+    RefPtr<AudioDestinationNode> m_destinationNode;
+    RefPtr<AudioListener> m_listener;
+
+    unsigned m_connectionCount { 0 };
+
+    // Graph locking.
+    Lock m_contextGraphMutex;
+    // FIXME: Using volatile seems incorrect.
+    // https://bugs.webkit.org/show_bug.cgi?id=180332
+    Thread* volatile m_audioThread { nullptr };
+    Thread* volatile m_graphOwnerThread { nullptr }; // if the lock is held then this is the thread which owns it, otherwise == nullptr.
+
+    std::unique_ptr<AsyncAudioDecoder> m_audioDecoder;
+
+    // This is considering 32 is large enough for multiple channels audio. 
+    // It is somewhat arbitrary and could be increased if necessary.
+    enum { MaxNumberOfChannels = 32 };
+
+    // Number of AudioBufferSourceNodes that are active (playing).
+    std::atomic<int> m_activeSourceCount { 0 };
+
+    BehaviorRestrictions m_restrictions { NoRestrictions };
+
+    State m_state { State::Suspended };
+    RefPtr<PendingActivity<BaseAudioContext>> m_pendingActivity;
+};
+
+} // WebCore
+
+SPECIALIZE_TYPE_TRAITS_BEGIN(WebCore::BaseAudioContext)
+    static bool isType(const WebCore::AudioContextBase& context) { return context.isBaseAudioContext(); }
+SPECIALIZE_TYPE_TRAITS_END()
diff --git a/Source/WebCore/Modules/webaudio/BaseAudioContext.idl b/Source/WebCore/Modules/webaudio/BaseAudioContext.idl
new file mode 100644 (file)
index 0000000..2291c95
--- /dev/null
@@ -0,0 +1,73 @@
+/*
+ * Copyright (C) 2010, Google Inc. All rights reserved.
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1.  Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2.  Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+[
+    ActiveDOMObject,
+    Conditional=WEB_AUDIO,
+    EnabledBySetting=WebAudio&ModernUnprefixedWebAudio,
+    ExportMacro=WEBCORE_EXPORT,
+] interface BaseAudioContext : EventTarget {
+    // All rendered audio ultimately connects to destination, which represents the audio hardware.
+    readonly attribute AudioDestinationNode destination;
+
+    // All scheduled times are relative to this time in seconds.
+    readonly attribute unrestricted double currentTime;
+
+    // All AudioNodes in the context run at this sample-rate (sample-frames per second).
+    readonly attribute unrestricted float sampleRate;
+
+    // All panning is relative to this listener.
+    readonly attribute AudioListener listener;
+
+    readonly attribute AudioContextState state;
+    attribute EventHandler onstatechange;
+    
+    [MayThrowException] AudioBuffer createBuffer(unsigned long numberOfChannels, unsigned long numberOfFrames, unrestricted float sampleRate);
+    [MayThrowException] AudioBuffer createBuffer(ArrayBuffer buffer, boolean mixToMono);
+
+    // Asynchronous audio file data decoding.
+    // FIXME: successCallback should be optional and the callbacks should not be nullable. This should also return a Promise.
+    void decodeAudioData(ArrayBuffer audioData, AudioBufferCallback? successCallback, optional AudioBufferCallback? errorCallback);
+
+    // Sources
+    [MayThrowException] AudioBufferSourceNode createBufferSource();
+
+    // Processing nodes
+    [MayThrowException] GainNode createGain();
+    [MayThrowException] DelayNode createDelay(optional unrestricted double maxDelayTime = 1);
+    [MayThrowException] BiquadFilterNode createBiquadFilter();
+    [MayThrowException] WaveShaperNode createWaveShaper();
+    [MayThrowException] PannerNode createPanner();
+    [MayThrowException] ConvolverNode createConvolver();
+    [MayThrowException] DynamicsCompressorNode createDynamicsCompressor();
+    [MayThrowException] AnalyserNode createAnalyser();
+    [MayThrowException] ScriptProcessorNode createScriptProcessor(optional unsigned long bufferSize = 0, optional unsigned long numberOfInputChannels = 2, optional unsigned long numberOfOutputChannels = 2);
+    [MayThrowException] OscillatorNode createOscillator();
+    [MayThrowException] PeriodicWave createPeriodicWave(Float32Array real, Float32Array imag);
+
+    // Channel splitting and merging
+    [MayThrowException] ChannelSplitterNode createChannelSplitter(optional unsigned long numberOfOutputs = 6);
+    [MayThrowException] ChannelMergerNode createChannelMerger(optional unsigned long numberOfInputs = 6);
+};
index 68fc8d6..17a79fe 100644 (file)
@@ -37,7 +37,7 @@ namespace WebCore {
 WTF_MAKE_ISO_ALLOCATED_IMPL(OfflineAudioContext);
 
 inline OfflineAudioContext::OfflineAudioContext(Document& document, AudioBuffer* renderTarget)
-    : AudioContext(document, renderTarget)
+    : BaseAudioContext(document, renderTarget)
 {
 }
 
index 85a595f..acbabb7 100644 (file)
 
 #pragma once
 
-#include "AudioContext.h"
+#include "BaseAudioContext.h"
 
 namespace WebCore {
 
-class OfflineAudioContext final : public AudioContext {
+class OfflineAudioContext final : public BaseAudioContext {
     WTF_MAKE_ISO_ALLOCATED(OfflineAudioContext);
 public:
     static ExceptionOr<Ref<OfflineAudioContext>> create(ScriptExecutionContext&, unsigned numberOfChannels, size_t numberOfFrames, float sampleRate);
index 0e71a06..729c759 100644 (file)
     Conditional=WEB_AUDIO,
     EnabledBySetting=ModernUnprefixedWebAudio,
     JSGenerateToJSObject,
-] interface OfflineAudioContext : AudioContext {
+] interface OfflineAudioContext : BaseAudioContext {
     [CallWith=ScriptExecutionContext, MayThrowException] constructor(unsigned long numberOfChannels, unsigned long numberOfFrames, unrestricted float sampleRate);
+
+    void startRendering();
+
+    [ImplementedAs=resumeRendering] Promise<void> resume();
+    [ImplementedAs=suspendRendering] Promise<void> suspend(); // FIXME: Missing suspendTime parameter.
+
+    // FIXME: Add support.
+    // readonly attribute unsigned long length;
+
+    attribute EventHandler oncomplete;
 };
index f3800f1..99ebc5c 100644 (file)
@@ -53,7 +53,7 @@ PannerNodeBase::PannerNodeBase(AudioContextBase& context, float sampleRate)
 {
 }
 
-PannerNode::PannerNode(AudioContext& context, float sampleRate)
+PannerNode::PannerNode(BaseAudioContext& context, float sampleRate)
     : PannerNodeBase(context, sampleRate)
     , m_panningModel(PanningModelType::HRTF)
     , m_lastGain(-1.0)
index f2ff2d2..407ca41 100644 (file)
@@ -62,15 +62,15 @@ protected:
 class PannerNode final : public PannerNodeBase {
     WTF_MAKE_ISO_ALLOCATED(PannerNode);
 public:
-    static Ref<PannerNode> create(AudioContext& context, float sampleRate)
+    static Ref<PannerNode> create(BaseAudioContext& context, float sampleRate)
     {
         return adoptRef(*new PannerNode(context, sampleRate));
     }
 
     virtual ~PannerNode();
 
-    AudioContext& context() { return downcast<AudioContext>(AudioNode::context()); }
-    const AudioContext& context() const { return downcast<AudioContext>(AudioNode::context()); }
+    BaseAudioContext& context() { return downcast<BaseAudioContext>(AudioNode::context()); }
+    const BaseAudioContext& context() const { return downcast<BaseAudioContext>(AudioNode::context()); }
 
     // AudioNode
     void process(size_t framesToProcess) override;
@@ -134,7 +134,7 @@ public:
     double latencyTime() const override { return m_panner ? m_panner->latencyTime() : 0; }
 
 private:
-    PannerNode(AudioContext&, float sampleRate);
+    PannerNode(BaseAudioContext&, float sampleRate);
 
     // Returns the combined distance and cone gain attenuation.
     float distanceConeGain();
index 1446609..e9abb79 100644 (file)
@@ -320,7 +320,7 @@ private:
 
     void visibilityStateChanged() final;
 
-    bool isAudioContext() const final { return false; }
+    bool isBaseAudioContext() const final { return false; }
     bool isWebKitAudioContext() const final { return true; }
 
     void handleDirtyAudioSummingJunctions();
index 20a201d..063f8e0 100644 (file)
@@ -242,6 +242,7 @@ Modules/webaudio/AudioParamTimeline.cpp
 Modules/webaudio/AudioProcessingEvent.cpp
 Modules/webaudio/AudioScheduledSourceNode.cpp
 Modules/webaudio/AudioSummingJunction.cpp
+Modules/webaudio/BaseAudioContext.cpp
 Modules/webaudio/BiquadDSPKernel.cpp
 Modules/webaudio/BiquadFilterNode.cpp
 Modules/webaudio/BiquadProcessor.cpp
@@ -2762,6 +2763,7 @@ JSAuthenticatorAttestationResponse.cpp
 JSAuthenticatorResponse.cpp
 JSAuthenticatorTransport.cpp
 JSBarProp.cpp
+JSBaseAudioContext.cpp
 JSBasicCredential.cpp
 JSBeforeLoadEvent.cpp
 JSBeforeUnloadEvent.cpp
index 62d0f95..21528f2 100644 (file)
                83102B271F9EADD900E404B9 /* JSExtendableMessageEvent.h in Headers */ = {isa = PBXBuildFile; fileRef = 83102B231F9EADC200E404B9 /* JSExtendableMessageEvent.h */; };
                8311C0031FAA2E9500E3C8E5 /* SWServerJobQueue.h in Headers */ = {isa = PBXBuildFile; fileRef = 8311C0021FAA2E8900E3C8E5 /* SWServerJobQueue.h */; settings = {ATTRIBUTES = (Private, ); }; };
                83120C711C56F3FB001CB112 /* HTMLDataElement.h in Headers */ = {isa = PBXBuildFile; fileRef = 834B86A71C56E83A00F3F0E3 /* HTMLDataElement.h */; };
+               83198FBF24A160DD00420B05 /* BaseAudioContext.h in Headers */ = {isa = PBXBuildFile; fileRef = 83198FBE24A160C100420B05 /* BaseAudioContext.h */; };
                831B61762499A5BB00C07C79 /* WebKitAudioPannerNode.h in Headers */ = {isa = PBXBuildFile; fileRef = 831B61742499A59C00C07C79 /* WebKitAudioPannerNode.h */; };
                831B61772499A5BF00C07C79 /* WebKitOfflineAudioContext.h in Headers */ = {isa = PBXBuildFile; fileRef = 831B61752499A59D00C07C79 /* WebKitOfflineAudioContext.h */; };
                8321507E1F27EA1B0095B136 /* NavigatorBeacon.h in Headers */ = {isa = PBXBuildFile; fileRef = 8321507B1F27EA150095B136 /* NavigatorBeacon.h */; };
                83102B251F9EADC200E404B9 /* JSExtendableMessageEvent.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = JSExtendableMessageEvent.cpp; sourceTree = "<group>"; };
                8311C0001FAA2E8900E3C8E5 /* SWServerJobQueue.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = SWServerJobQueue.cpp; sourceTree = "<group>"; };
                8311C0021FAA2E8900E3C8E5 /* SWServerJobQueue.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = SWServerJobQueue.h; sourceTree = "<group>"; };
+               83198FBB24A160C100420B05 /* BaseAudioContext.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = BaseAudioContext.cpp; sourceTree = "<group>"; };
+               83198FBD24A160C100420B05 /* BaseAudioContext.idl */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = BaseAudioContext.idl; sourceTree = "<group>"; };
+               83198FBE24A160C100420B05 /* BaseAudioContext.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = BaseAudioContext.h; sourceTree = "<group>"; };
                831B616F2499A59800C07C79 /* WebKitAudioPannerNode.idl */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = WebKitAudioPannerNode.idl; sourceTree = "<group>"; };
                831B61712499A59900C07C79 /* WebKitAudioPannerNode.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = WebKitAudioPannerNode.cpp; sourceTree = "<group>"; };
                831B61722499A59A00C07C79 /* WebKitOfflineAudioContext.idl */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = WebKitOfflineAudioContext.idl; sourceTree = "<group>"; };
                                FD8C46EA154608E700A5910C /* AudioScheduledSourceNode.h */,
                                FDB052DD1561A42C00B500D6 /* AudioSummingJunction.cpp */,
                                FDB052DE1561A42C00B500D6 /* AudioSummingJunction.h */,
+                               83198FBB24A160C100420B05 /* BaseAudioContext.cpp */,
+                               83198FBE24A160C100420B05 /* BaseAudioContext.h */,
+                               83198FBD24A160C100420B05 /* BaseAudioContext.idl */,
                                FD315FDA12B0267600C1A359 /* BiquadDSPKernel.cpp */,
                                FD315FDB12B0267600C1A359 /* BiquadDSPKernel.h */,
                                FDC54F011399B0DA008D9117 /* BiquadFilterNode.cpp */,
                                51A1B87D2087C4C000979A75 /* BackForwardItemIdentifier.h in Headers */,
                                BC124EE80C2641CD009E2349 /* BarProp.h in Headers */,
                                460BB6161D0A1BF000221812 /* Base64Utilities.h in Headers */,
+                               83198FBF24A160DD00420B05 /* BaseAudioContext.h in Headers */,
                                412DEF1F23A918A300D840F6 /* BaseAudioSharedUnit.h in Headers */,
                                379E61CA126CA5C400B63E8D /* BaseButtonInputType.h in Headers */,
                                379E61CC126CA5C400B63E8D /* BaseCheckableInputType.h in Headers */,
index 6703347..84c98e6 100644 (file)
@@ -53,6 +53,7 @@ namespace WebCore {
     macro(AuthenticatorAssertionResponse) \
     macro(AuthenticatorAttestationResponse) \
     macro(AuthenticatorResponse) \
+    macro(BaseAudioContext) \
     macro(BeforeLoadEvent) \
     macro(BlobEvent) \
     macro(Cache) \
index a154b44..e5b4aed 100644 (file)
@@ -5,8 +5,8 @@ useNamespaceAsSuffix=true
 AbortSignal
 ApplePaySession conditional=APPLE_PAY
 AudioNode conditional=WEB_AUDIO
-AudioContext conditional=WEB_AUDIO
 AudioTrackList conditional=VIDEO
+BaseAudioContext conditional=WEB_AUDIO
 Clipboard
 DedicatedWorkerGlobalScope
 DOMApplicationCache
index 341fef1..6721872 100644 (file)
 #endif
 
 #if ENABLE(WEB_AUDIO)
-#include "AudioContext.h"
+#include "BaseAudioContext.h"
 #include "WebKitAudioContext.h"
 #endif
 
@@ -4299,10 +4299,10 @@ void Internals::sendMediaControlEvent(MediaControlEvent event)
 #endif // ENABLE(MEDIA_SESSION)
 
 #if ENABLE(WEB_AUDIO)
-void Internals::setAudioContextRestrictions(const Variant<RefPtr<AudioContext>, RefPtr<WebKitAudioContext>>& contextVariant, StringView restrictionsString)
+void Internals::setAudioContextRestrictions(const Variant<RefPtr<BaseAudioContext>, RefPtr<WebKitAudioContext>>& contextVariant, StringView restrictionsString)
 {
     RefPtr<AudioContextBase> context;
-    switchOn(contextVariant, [&](RefPtr<AudioContext> entry) {
+    switchOn(contextVariant, [&](RefPtr<BaseAudioContext> entry) {
         context = entry;
     }, [&](RefPtr<WebKitAudioContext> entry) {
         context = entry;
index dda9c89..daa94c8 100644 (file)
@@ -51,8 +51,8 @@
 namespace WebCore {
 
 class AnimationTimeline;
-class AudioContext;
 class AudioTrack;
+class BaseAudioContext;
 class CacheStorageConnection;
 class DOMRect;
 class DOMRectList;
@@ -676,7 +676,7 @@ public:
 #endif
 
 #if ENABLE(WEB_AUDIO)
-    void setAudioContextRestrictions(const Variant<RefPtr<AudioContext>, RefPtr<WebKitAudioContext>>&, StringView restrictionsString);
+    void setAudioContextRestrictions(const Variant<RefPtr<BaseAudioContext>, RefPtr<WebKitAudioContext>>&, StringView restrictionsString);
     void useMockAudioDestinationCocoa();
 #endif
 
index 0eb0f15..a561b97 100644 (file)
@@ -677,7 +677,7 @@ enum CompositingPolicy {
     [Conditional=VIDEO, MayThrowException] void setMediaSessionRestrictions(DOMString mediaType, DOMString restrictions);
     [Conditional=VIDEO, MayThrowException] DOMString mediaSessionRestrictions(DOMString mediaType);
     [Conditional=VIDEO] void setMediaElementRestrictions(HTMLMediaElement element, DOMString restrictions);
-    [Conditional=WEB_AUDIO] void setAudioContextRestrictions((AudioContext or WebKitAudioContext) context, DOMString restrictions);
+    [Conditional=WEB_AUDIO] void setAudioContextRestrictions((BaseAudioContext or WebKitAudioContext) context, DOMString restrictions);
     [Conditional=VIDEO, MayThrowException] void postRemoteControlCommand(DOMString command, optional unrestricted float argument = 0);
     [Conditional=WIRELESS_PLAYBACK_TARGET] void setMockMediaPlaybackTargetPickerEnabled(boolean enabled);
     [Conditional=WIRELESS_PLAYBACK_TARGET, MayThrowException] void setMockMediaPlaybackTargetPickerState(DOMString deviceName, DOMString deviceState);