Handle IDLPromise<> properly
[WebKit-https.git] / Source / WebCore / Modules / webaudio / AudioContext.cpp
1 /*
2  * Copyright (C) 2010 Google Inc. All rights reserved.
3  * Copyright (C) 2016 Apple Inc. All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1.  Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2.  Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
15  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
16  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
17  * DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
18  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
19  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
20  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
21  * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
23  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24  */
25
26 #include "config.h"
27
28 #if ENABLE(WEB_AUDIO)
29
30 #include "AudioContext.h"
31
32 #include "AnalyserNode.h"
33 #include "AsyncAudioDecoder.h"
34 #include "AudioBuffer.h"
35 #include "AudioBufferCallback.h"
36 #include "AudioBufferSourceNode.h"
37 #include "AudioListener.h"
38 #include "AudioNodeInput.h"
39 #include "AudioNodeOutput.h"
40 #include "BiquadFilterNode.h"
41 #include "ChannelMergerNode.h"
42 #include "ChannelSplitterNode.h"
43 #include "ConvolverNode.h"
44 #include "DefaultAudioDestinationNode.h"
45 #include "DelayNode.h"
46 #include "Document.h"
47 #include "DynamicsCompressorNode.h"
48 #include "EventNames.h"
49 #include "ExceptionCode.h"
50 #include "FFTFrame.h"
51 #include "Frame.h"
52 #include "GainNode.h"
53 #include "GenericEventQueue.h"
54 #include "HRTFDatabaseLoader.h"
55 #include "HRTFPanner.h"
56 #include "JSDOMPromiseDeferred.h"
57 #include "Logging.h"
58 #include "NetworkingContext.h"
59 #include "OfflineAudioCompletionEvent.h"
60 #include "OfflineAudioDestinationNode.h"
61 #include "OscillatorNode.h"
62 #include "Page.h"
63 #include "PannerNode.h"
64 #include "PeriodicWave.h"
65 #include "ScriptController.h"
66 #include "ScriptProcessorNode.h"
67 #include "WaveShaperNode.h"
68 #include <inspector/ScriptCallStack.h>
69
70 #if ENABLE(MEDIA_STREAM)
71 #include "MediaStream.h"
72 #include "MediaStreamAudioDestinationNode.h"
73 #include "MediaStreamAudioSource.h"
74 #include "MediaStreamAudioSourceNode.h"
75 #endif
76
77 #if ENABLE(VIDEO)
78 #include "HTMLMediaElement.h"
79 #include "MediaElementAudioSourceNode.h"
80 #endif
81
82 #if DEBUG_AUDIONODE_REFERENCES
83 #include <stdio.h>
84 #endif
85
86 #if USE(GSTREAMER)
87 #include "GStreamerUtilities.h"
88 #endif
89
90 #if PLATFORM(IOS)
91 #include "ScriptController.h"
92 #include "Settings.h"
93 #endif
94
95 #include <runtime/ArrayBuffer.h>
96 #include <wtf/Atomics.h>
97 #include <wtf/MainThread.h>
98 #include <wtf/Ref.h>
99 #include <wtf/RefCounted.h>
100 #include <wtf/text/WTFString.h>
101
102 // FIXME: check the proper way to reference an undefined thread ID
103 const int UndefinedThreadIdentifier = 0xffffffff;
104
105 const unsigned MaxPeriodicWaveLength = 4096;
106
107 namespace WebCore {
108
109 #define RELEASE_LOG_IF_ALLOWED(fmt, ...) RELEASE_LOG_IF(document()->page() && document()->page()->isAlwaysOnLoggingAllowed(), Media, "%p - AudioContext::" fmt, this, ##__VA_ARGS__)
110     
111 bool AudioContext::isSampleRateRangeGood(float sampleRate)
112 {
113     // FIXME: It would be nice if the minimum sample-rate could be less than 44.1KHz,
114     // but that will require some fixes in HRTFPanner::fftSizeForSampleRate(), and some testing there.
115     return sampleRate >= 44100 && sampleRate <= 96000;
116 }
117
118 // Don't allow more than this number of simultaneous AudioContexts talking to hardware.
119 const unsigned MaxHardwareContexts = 4;
120 unsigned AudioContext::s_hardwareContextCount = 0;
121     
122 RefPtr<AudioContext> AudioContext::create(Document& document)
123 {
124     ASSERT(isMainThread());
125     if (s_hardwareContextCount >= MaxHardwareContexts)
126         return nullptr;
127
128     RefPtr<AudioContext> audioContext(adoptRef(new AudioContext(document)));
129     audioContext->suspendIfNeeded();
130     return audioContext;
131 }
132
133 // Constructor for rendering to the audio hardware.
134 AudioContext::AudioContext(Document& document)
135     : ActiveDOMObject(&document)
136     , m_mediaSession(PlatformMediaSession::create(*this))
137     , m_eventQueue(std::make_unique<GenericEventQueue>(*this))
138     , m_graphOwnerThread(UndefinedThreadIdentifier)
139 {
140     constructCommon();
141
142     m_destinationNode = DefaultAudioDestinationNode::create(*this);
143
144     // Initialize the destination node's muted state to match the page's current muted state.
145     pageMutedStateDidChange();
146 }
147
148 // Constructor for offline (non-realtime) rendering.
149 AudioContext::AudioContext(Document& document, unsigned numberOfChannels, size_t numberOfFrames, float sampleRate)
150     : ActiveDOMObject(&document)
151     , m_isOfflineContext(true)
152     , m_mediaSession(PlatformMediaSession::create(*this))
153     , m_eventQueue(std::make_unique<GenericEventQueue>(*this))
154     , m_graphOwnerThread(UndefinedThreadIdentifier)
155 {
156     constructCommon();
157
158     // Create a new destination for offline rendering.
159     m_renderTarget = AudioBuffer::create(numberOfChannels, numberOfFrames, sampleRate);
160     m_destinationNode = OfflineAudioDestinationNode::create(*this, m_renderTarget.get());
161 }
162
163 void AudioContext::constructCommon()
164 {
165     // According to spec AudioContext must die only after page navigate.
166     // Lets mark it as ActiveDOMObject with pending activity and unmark it in clear method.
167     setPendingActivity(this);
168
169 #if USE(GSTREAMER)
170     initializeGStreamer();
171 #endif
172
173     FFTFrame::initialize();
174     
175     m_listener = AudioListener::create();
176
177 #if PLATFORM(IOS)
178     if (document()->settings().audioPlaybackRequiresUserGesture())
179         addBehaviorRestriction(RequireUserGestureForAudioStartRestriction);
180     else
181         m_restrictions = NoRestrictions;
182 #endif
183
184 #if PLATFORM(COCOA)
185     addBehaviorRestriction(RequirePageConsentForAudioStartRestriction);
186 #endif
187 }
188
189 AudioContext::~AudioContext()
190 {
191 #if DEBUG_AUDIONODE_REFERENCES
192     fprintf(stderr, "%p: AudioContext::~AudioContext()\n", this);
193 #endif
194     ASSERT(!m_isInitialized);
195     ASSERT(m_isStopScheduled);
196     ASSERT(m_nodesToDelete.isEmpty());
197     ASSERT(m_referencedNodes.isEmpty());
198     ASSERT(m_finishedNodes.isEmpty()); // FIXME (bug 105870): This assertion fails on tests sometimes.
199     ASSERT(m_automaticPullNodes.isEmpty());
200     if (m_automaticPullNodesNeedUpdating)
201         m_renderingAutomaticPullNodes.resize(m_automaticPullNodes.size());
202     ASSERT(m_renderingAutomaticPullNodes.isEmpty());
203     // FIXME: Can we assert that m_deferredFinishDerefList is empty?
204 }
205
206 void AudioContext::lazyInitialize()
207 {
208     if (m_isInitialized)
209         return;
210
211     // Don't allow the context to initialize a second time after it's already been explicitly uninitialized.
212     ASSERT(!m_isAudioThreadFinished);
213     if (m_isAudioThreadFinished)
214         return;
215
216     if (m_destinationNode) {
217         m_destinationNode->initialize();
218
219         if (!isOfflineContext()) {
220             document()->addAudioProducer(this);
221             document()->registerForVisibilityStateChangedCallbacks(this);
222
223             // This starts the audio thread. The destination node's provideInput() method will now be called repeatedly to render audio.
224             // Each time provideInput() is called, a portion of the audio stream is rendered. Let's call this time period a "render quantum".
225             // NOTE: for now default AudioContext does not need an explicit startRendering() call from JavaScript.
226             // We may want to consider requiring it for symmetry with OfflineAudioContext.
227             startRendering();
228             ++s_hardwareContextCount;
229         }
230     }
231     m_isInitialized = true;
232 }
233
234 void AudioContext::clear()
235 {
236     // We have to release our reference to the destination node before the context will ever be deleted since the destination node holds a reference to the context.
237     if (m_destinationNode)
238         m_destinationNode = nullptr;
239
240     // Audio thread is dead. Nobody will schedule node deletion action. Let's do it ourselves.
241     do {
242         deleteMarkedNodes();
243         m_nodesToDelete.appendVector(m_nodesMarkedForDeletion);
244         m_nodesMarkedForDeletion.clear();
245     } while (m_nodesToDelete.size());
246
247     // It was set in constructCommon.
248     unsetPendingActivity(this);
249 }
250
251 void AudioContext::uninitialize()
252 {
253     ASSERT(isMainThread());
254
255     if (!m_isInitialized)
256         return;
257
258     // This stops the audio thread and all audio rendering.
259     m_destinationNode->uninitialize();
260
261     // Don't allow the context to initialize a second time after it's already been explicitly uninitialized.
262     m_isAudioThreadFinished = true;
263
264     if (!isOfflineContext()) {
265         document()->removeAudioProducer(this);
266         document()->unregisterForVisibilityStateChangedCallbacks(this);
267
268         ASSERT(s_hardwareContextCount);
269         --s_hardwareContextCount;
270
271         // Offline contexts move to 'Closed' state when dispatching the completion event.
272         setState(State::Closed);
273     }
274
275     // Get rid of the sources which may still be playing.
276     derefUnfinishedSourceNodes();
277
278     m_isInitialized = false;
279 }
280
281 bool AudioContext::isInitialized() const
282 {
283     return m_isInitialized;
284 }
285
286 void AudioContext::addReaction(State state, DOMPromiseDeferred<void>&& promise)
287 {
288     size_t stateIndex = static_cast<size_t>(state);
289     if (stateIndex >= m_stateReactions.size())
290         m_stateReactions.resize(stateIndex + 1);
291
292     m_stateReactions[stateIndex].append(WTFMove(promise));
293 }
294
295 void AudioContext::setState(State state)
296 {
297     if (m_state == state)
298         return;
299
300     m_state = state;
301     m_eventQueue->enqueueEvent(Event::create(eventNames().statechangeEvent, true, false));
302
303     size_t stateIndex = static_cast<size_t>(state);
304     if (stateIndex >= m_stateReactions.size())
305         return;
306
307     Vector<DOMPromiseDeferred<void>> reactions;
308     m_stateReactions[stateIndex].swap(reactions);
309
310     for (auto& promise : reactions)
311         promise.resolve();
312 }
313
314 void AudioContext::stop()
315 {
316     ASSERT(isMainThread());
317
318     // Usually ScriptExecutionContext calls stop twice.
319     if (m_isStopScheduled)
320         return;
321     m_isStopScheduled = true;
322
323     document()->updateIsPlayingMedia();
324
325     m_eventQueue->close();
326
327     // Don't call uninitialize() immediately here because the ScriptExecutionContext is in the middle
328     // of dealing with all of its ActiveDOMObjects at this point. uninitialize() can de-reference other
329     // ActiveDOMObjects so let's schedule uninitialize() to be called later.
330     // FIXME: see if there's a more direct way to handle this issue.
331     // FIXME: This sounds very wrong. The whole idea of stop() is that it stops everything, and if we
332     // schedule some observable work for later, the work likely happens at an inappropriate time.
333     callOnMainThread([this] {
334         uninitialize();
335         clear();
336     });
337 }
338
339 bool AudioContext::canSuspendForDocumentSuspension() const
340 {
341     // FIXME: We should be able to suspend while rendering as well with some more code.
342     return m_state == State::Suspended || m_state == State::Closed;
343 }
344
345 const char* AudioContext::activeDOMObjectName() const
346 {
347     return "AudioContext";
348 }
349
350 Document* AudioContext::document() const
351 {
352     ASSERT(m_scriptExecutionContext);
353     return downcast<Document>(m_scriptExecutionContext);
354 }
355
356 const Document* AudioContext::hostingDocument() const
357 {
358     return downcast<Document>(m_scriptExecutionContext);
359 }
360
361 String AudioContext::sourceApplicationIdentifier() const
362 {
363     Document* document = this->document();
364     if (Frame* frame = document ? document->frame() : nullptr) {
365         if (NetworkingContext* networkingContext = frame->loader().networkingContext())
366             return networkingContext->sourceApplicationIdentifier();
367     }
368     return emptyString();
369 }
370
371 void AudioContext::visibilityStateChanged()
372 {
373     // Do not suspend if audio is audible.
374     if (mediaState() == MediaProducer::IsPlayingAudio)
375         return;
376
377     if (document()->hidden()) {
378         if (state() == State::Running) {
379             RELEASE_LOG_IF_ALLOWED("visibilityStateChanged() Suspending playback after going to the background");
380             m_mediaSession->beginInterruption(PlatformMediaSession::EnteringBackground);
381         }
382     } else {
383         if (state() == State::Interrupted) {
384             RELEASE_LOG_IF_ALLOWED("visibilityStateChanged() Resuming playback after entering foreground");
385             m_mediaSession->endInterruption(PlatformMediaSession::MayResumePlaying);
386         }
387     }
388 }
389
390 ExceptionOr<Ref<AudioBuffer>> AudioContext::createBuffer(unsigned numberOfChannels, size_t numberOfFrames, float sampleRate)
391 {
392     auto audioBuffer = AudioBuffer::create(numberOfChannels, numberOfFrames, sampleRate);
393     if (!audioBuffer)
394         return Exception { NOT_SUPPORTED_ERR };
395     return audioBuffer.releaseNonNull();
396 }
397
398 ExceptionOr<Ref<AudioBuffer>> AudioContext::createBuffer(ArrayBuffer& arrayBuffer, bool mixToMono)
399 {
400     auto audioBuffer = AudioBuffer::createFromAudioFileData(arrayBuffer.data(), arrayBuffer.byteLength(), mixToMono, sampleRate());
401     if (!audioBuffer)
402         return Exception { SYNTAX_ERR };
403     return audioBuffer.releaseNonNull();
404 }
405
406 void AudioContext::decodeAudioData(Ref<ArrayBuffer>&& audioData, RefPtr<AudioBufferCallback>&& successCallback, RefPtr<AudioBufferCallback>&& errorCallback)
407 {
408     m_audioDecoder.decodeAsync(WTFMove(audioData), sampleRate(), WTFMove(successCallback), WTFMove(errorCallback));
409 }
410
411 Ref<AudioBufferSourceNode> AudioContext::createBufferSource()
412 {
413     ASSERT(isMainThread());
414     lazyInitialize();
415     Ref<AudioBufferSourceNode> node = AudioBufferSourceNode::create(*this, m_destinationNode->sampleRate());
416
417     // Because this is an AudioScheduledSourceNode, the context keeps a reference until it has finished playing.
418     // When this happens, AudioScheduledSourceNode::finish() calls AudioContext::notifyNodeFinishedProcessing().
419     refNode(node);
420
421     return node;
422 }
423
424 #if ENABLE(VIDEO)
425
426 ExceptionOr<Ref<MediaElementAudioSourceNode>> AudioContext::createMediaElementSource(HTMLMediaElement& mediaElement)
427 {
428     ASSERT(isMainThread());
429     lazyInitialize();
430     
431     if (mediaElement.audioSourceNode())
432         return Exception { INVALID_STATE_ERR };
433
434     auto node = MediaElementAudioSourceNode::create(*this, mediaElement);
435
436     mediaElement.setAudioSourceNode(node.ptr());
437
438     refNode(node.get()); // context keeps reference until node is disconnected
439     return WTFMove(node);
440 }
441
442 #endif
443
444 #if ENABLE(MEDIA_STREAM)
445
446 ExceptionOr<Ref<MediaStreamAudioSourceNode>> AudioContext::createMediaStreamSource(MediaStream& mediaStream)
447 {
448     ASSERT(isMainThread());
449
450     auto audioTracks = mediaStream.getAudioTracks();
451     if (audioTracks.isEmpty())
452         return Exception { INVALID_STATE_ERR };
453
454     MediaStreamTrack* providerTrack = nullptr;
455     for (auto& track : audioTracks) {
456         if (track->audioSourceProvider()) {
457             providerTrack = track.get();
458             break;
459         }
460     }
461     if (!providerTrack)
462         return Exception { INVALID_STATE_ERR };
463
464     lazyInitialize();
465
466     auto node = MediaStreamAudioSourceNode::create(*this, mediaStream, *providerTrack);
467     node->setFormat(2, sampleRate());
468
469     refNode(node); // context keeps reference until node is disconnected
470     return WTFMove(node);
471 }
472
473 Ref<MediaStreamAudioDestinationNode> AudioContext::createMediaStreamDestination()
474 {
475     // FIXME: Add support for an optional argument which specifies the number of channels.
476     // FIXME: The default should probably be stereo instead of mono.
477     return MediaStreamAudioDestinationNode::create(*this, 1);
478 }
479
480 #endif
481
482 ExceptionOr<Ref<ScriptProcessorNode>> AudioContext::createScriptProcessor(size_t bufferSize, size_t numberOfInputChannels, size_t numberOfOutputChannels)
483 {
484     ASSERT(isMainThread());
485     lazyInitialize();
486     auto node = ScriptProcessorNode::create(*this, m_destinationNode->sampleRate(), bufferSize, numberOfInputChannels, numberOfOutputChannels);
487
488     if (!node)
489         return Exception { INDEX_SIZE_ERR };
490
491     refNode(*node); // context keeps reference until we stop making javascript rendering callbacks
492     return node.releaseNonNull();
493 }
494
495 Ref<BiquadFilterNode> AudioContext::createBiquadFilter()
496 {
497     ASSERT(isMainThread());
498     lazyInitialize();
499     return BiquadFilterNode::create(*this, m_destinationNode->sampleRate());
500 }
501
502 Ref<WaveShaperNode> AudioContext::createWaveShaper()
503 {
504     ASSERT(isMainThread());
505     lazyInitialize();
506     return WaveShaperNode::create(*this);
507 }
508
509 Ref<PannerNode> AudioContext::createPanner()
510 {
511     ASSERT(isMainThread());
512     lazyInitialize();
513     return PannerNode::create(*this, m_destinationNode->sampleRate());
514 }
515
516 Ref<ConvolverNode> AudioContext::createConvolver()
517 {
518     ASSERT(isMainThread());
519     lazyInitialize();
520     return ConvolverNode::create(*this, m_destinationNode->sampleRate());
521 }
522
523 Ref<DynamicsCompressorNode> AudioContext::createDynamicsCompressor()
524 {
525     ASSERT(isMainThread());
526     lazyInitialize();
527     return DynamicsCompressorNode::create(*this, m_destinationNode->sampleRate());
528 }
529
530 Ref<AnalyserNode> AudioContext::createAnalyser()
531 {
532     ASSERT(isMainThread());
533     lazyInitialize();
534     return AnalyserNode::create(*this, m_destinationNode->sampleRate());
535 }
536
537 Ref<GainNode> AudioContext::createGain()
538 {
539     ASSERT(isMainThread());
540     lazyInitialize();
541     return GainNode::create(*this, m_destinationNode->sampleRate());
542 }
543
544 ExceptionOr<Ref<DelayNode>> AudioContext::createDelay(double maxDelayTime)
545 {
546     ASSERT(isMainThread());
547     lazyInitialize();
548     return DelayNode::create(*this, m_destinationNode->sampleRate(), maxDelayTime);
549 }
550
551 ExceptionOr<Ref<ChannelSplitterNode>> AudioContext::createChannelSplitter(size_t numberOfOutputs)
552 {
553     ASSERT(isMainThread());
554     lazyInitialize();
555     auto node = ChannelSplitterNode::create(*this, m_destinationNode->sampleRate(), numberOfOutputs);
556     if (!node)
557         return Exception { INDEX_SIZE_ERR };
558     return node.releaseNonNull();
559 }
560
561 ExceptionOr<Ref<ChannelMergerNode>> AudioContext::createChannelMerger(size_t numberOfInputs)
562 {
563     ASSERT(isMainThread());
564     lazyInitialize();
565     auto node = ChannelMergerNode::create(*this, m_destinationNode->sampleRate(), numberOfInputs);
566     if (!node)
567         return Exception { INDEX_SIZE_ERR };
568     return node.releaseNonNull();
569 }
570
571 Ref<OscillatorNode> AudioContext::createOscillator()
572 {
573     ASSERT(isMainThread());
574     lazyInitialize();
575
576     Ref<OscillatorNode> node = OscillatorNode::create(*this, m_destinationNode->sampleRate());
577
578     // Because this is an AudioScheduledSourceNode, the context keeps a reference until it has finished playing.
579     // When this happens, AudioScheduledSourceNode::finish() calls AudioContext::notifyNodeFinishedProcessing().
580     refNode(node);
581
582     return node;
583 }
584
585 ExceptionOr<Ref<PeriodicWave>> AudioContext::createPeriodicWave(Float32Array& real, Float32Array& imaginary)
586 {
587     ASSERT(isMainThread());
588     if (real.length() != imaginary.length() || (real.length() > MaxPeriodicWaveLength) || !real.length())
589         return Exception { INDEX_SIZE_ERR };
590     lazyInitialize();
591     return PeriodicWave::create(sampleRate(), real, imaginary);
592 }
593
594 void AudioContext::notifyNodeFinishedProcessing(AudioNode* node)
595 {
596     ASSERT(isAudioThread());
597     m_finishedNodes.append(node);
598 }
599
600 void AudioContext::derefFinishedSourceNodes()
601 {
602     ASSERT(isGraphOwner());
603     ASSERT(isAudioThread() || isAudioThreadFinished());
604     for (auto& node : m_finishedNodes)
605         derefNode(*node);
606
607     m_finishedNodes.clear();
608 }
609
610 void AudioContext::refNode(AudioNode& node)
611 {
612     ASSERT(isMainThread());
613     AutoLocker locker(*this);
614     
615     node.ref(AudioNode::RefTypeConnection);
616     m_referencedNodes.append(&node);
617 }
618
619 void AudioContext::derefNode(AudioNode& node)
620 {
621     ASSERT(isGraphOwner());
622     
623     node.deref(AudioNode::RefTypeConnection);
624
625     ASSERT(m_referencedNodes.contains(&node));
626     m_referencedNodes.removeFirst(&node);
627 }
628
629 void AudioContext::derefUnfinishedSourceNodes()
630 {
631     ASSERT(isMainThread() && isAudioThreadFinished());
632     for (auto& node : m_referencedNodes)
633         node->deref(AudioNode::RefTypeConnection);
634
635     m_referencedNodes.clear();
636 }
637
638 void AudioContext::lock(bool& mustReleaseLock)
639 {
640     // Don't allow regular lock in real-time audio thread.
641     ASSERT(isMainThread());
642
643     ThreadIdentifier thisThread = currentThread();
644
645     if (thisThread == m_graphOwnerThread) {
646         // We already have the lock.
647         mustReleaseLock = false;
648     } else {
649         // Acquire the lock.
650         m_contextGraphMutex.lock();
651         m_graphOwnerThread = thisThread;
652         mustReleaseLock = true;
653     }
654 }
655
656 bool AudioContext::tryLock(bool& mustReleaseLock)
657 {
658     ThreadIdentifier thisThread = currentThread();
659     bool isAudioThread = thisThread == audioThread();
660
661     // Try to catch cases of using try lock on main thread - it should use regular lock.
662     ASSERT(isAudioThread || isAudioThreadFinished());
663     
664     if (!isAudioThread) {
665         // In release build treat tryLock() as lock() (since above ASSERT(isAudioThread) never fires) - this is the best we can do.
666         lock(mustReleaseLock);
667         return true;
668     }
669     
670     bool hasLock;
671     
672     if (thisThread == m_graphOwnerThread) {
673         // Thread already has the lock.
674         hasLock = true;
675         mustReleaseLock = false;
676     } else {
677         // Don't already have the lock - try to acquire it.
678         hasLock = m_contextGraphMutex.tryLock();
679         
680         if (hasLock)
681             m_graphOwnerThread = thisThread;
682
683         mustReleaseLock = hasLock;
684     }
685     
686     return hasLock;
687 }
688
689 void AudioContext::unlock()
690 {
691     ASSERT(currentThread() == m_graphOwnerThread);
692
693     m_graphOwnerThread = UndefinedThreadIdentifier;
694     m_contextGraphMutex.unlock();
695 }
696
697 bool AudioContext::isAudioThread() const
698 {
699     return currentThread() == m_audioThread;
700 }
701
702 bool AudioContext::isGraphOwner() const
703 {
704     return currentThread() == m_graphOwnerThread;
705 }
706
707 void AudioContext::addDeferredFinishDeref(AudioNode* node)
708 {
709     ASSERT(isAudioThread());
710     m_deferredFinishDerefList.append(node);
711 }
712
713 void AudioContext::handlePreRenderTasks()
714 {
715     ASSERT(isAudioThread());
716
717     // At the beginning of every render quantum, try to update the internal rendering graph state (from main thread changes).
718     // It's OK if the tryLock() fails, we'll just take slightly longer to pick up the changes.
719     bool mustReleaseLock;
720     if (tryLock(mustReleaseLock)) {
721         // Fixup the state of any dirty AudioSummingJunctions and AudioNodeOutputs.
722         handleDirtyAudioSummingJunctions();
723         handleDirtyAudioNodeOutputs();
724
725         updateAutomaticPullNodes();
726
727         if (mustReleaseLock)
728             unlock();
729     }
730 }
731
732 void AudioContext::handlePostRenderTasks()
733 {
734     ASSERT(isAudioThread());
735
736     // Must use a tryLock() here too. Don't worry, the lock will very rarely be contended and this method is called frequently.
737     // The worst that can happen is that there will be some nodes which will take slightly longer than usual to be deleted or removed
738     // from the render graph (in which case they'll render silence).
739     bool mustReleaseLock;
740     if (tryLock(mustReleaseLock)) {
741         // Take care of finishing any derefs where the tryLock() failed previously.
742         handleDeferredFinishDerefs();
743
744         // Dynamically clean up nodes which are no longer needed.
745         derefFinishedSourceNodes();
746
747         // Don't delete in the real-time thread. Let the main thread do it.
748         // Ref-counted objects held by certain AudioNodes may not be thread-safe.
749         scheduleNodeDeletion();
750
751         // Fixup the state of any dirty AudioSummingJunctions and AudioNodeOutputs.
752         handleDirtyAudioSummingJunctions();
753         handleDirtyAudioNodeOutputs();
754
755         updateAutomaticPullNodes();
756
757         if (mustReleaseLock)
758             unlock();
759     }
760 }
761
762 void AudioContext::handleDeferredFinishDerefs()
763 {
764     ASSERT(isAudioThread() && isGraphOwner());
765     for (auto& node : m_deferredFinishDerefList)
766         node->finishDeref(AudioNode::RefTypeConnection);
767     
768     m_deferredFinishDerefList.clear();
769 }
770
771 void AudioContext::markForDeletion(AudioNode* node)
772 {
773     ASSERT(isGraphOwner());
774
775     if (isAudioThreadFinished())
776         m_nodesToDelete.append(node);
777     else
778         m_nodesMarkedForDeletion.append(node);
779
780     // This is probably the best time for us to remove the node from automatic pull list,
781     // since all connections are gone and we hold the graph lock. Then when handlePostRenderTasks()
782     // gets a chance to schedule the deletion work, updateAutomaticPullNodes() also gets a chance to
783     // modify m_renderingAutomaticPullNodes.
784     removeAutomaticPullNode(node);
785 }
786
787 void AudioContext::scheduleNodeDeletion()
788 {
789     bool isGood = m_isInitialized && isGraphOwner();
790     ASSERT(isGood);
791     if (!isGood)
792         return;
793
794     // Make sure to call deleteMarkedNodes() on main thread.    
795     if (m_nodesMarkedForDeletion.size() && !m_isDeletionScheduled) {
796         m_nodesToDelete.appendVector(m_nodesMarkedForDeletion);
797         m_nodesMarkedForDeletion.clear();
798
799         m_isDeletionScheduled = true;
800
801         callOnMainThread([protectedThis = makeRef(*this)]() mutable {
802             protectedThis->deleteMarkedNodes();
803         });
804     }
805 }
806
807 void AudioContext::deleteMarkedNodes()
808 {
809     ASSERT(isMainThread());
810
811     // Protect this object from being deleted before we release the mutex locked by AutoLocker.
812     Ref<AudioContext> protectedThis(*this);
813     {
814         AutoLocker locker(*this);
815
816         while (m_nodesToDelete.size()) {
817             AudioNode* node = m_nodesToDelete.takeLast();
818
819             // Before deleting the node, clear out any AudioNodeInputs from m_dirtySummingJunctions.
820             unsigned numberOfInputs = node->numberOfInputs();
821             for (unsigned i = 0; i < numberOfInputs; ++i)
822                 m_dirtySummingJunctions.remove(node->input(i));
823
824             // Before deleting the node, clear out any AudioNodeOutputs from m_dirtyAudioNodeOutputs.
825             unsigned numberOfOutputs = node->numberOfOutputs();
826             for (unsigned i = 0; i < numberOfOutputs; ++i)
827                 m_dirtyAudioNodeOutputs.remove(node->output(i));
828
829             // Finally, delete it.
830             delete node;
831         }
832         m_isDeletionScheduled = false;
833     }
834 }
835
836 void AudioContext::markSummingJunctionDirty(AudioSummingJunction* summingJunction)
837 {
838     ASSERT(isGraphOwner());    
839     m_dirtySummingJunctions.add(summingJunction);
840 }
841
842 void AudioContext::removeMarkedSummingJunction(AudioSummingJunction* summingJunction)
843 {
844     ASSERT(isMainThread());
845     AutoLocker locker(*this);
846     m_dirtySummingJunctions.remove(summingJunction);
847 }
848
849 void AudioContext::markAudioNodeOutputDirty(AudioNodeOutput* output)
850 {
851     ASSERT(isGraphOwner());    
852     m_dirtyAudioNodeOutputs.add(output);
853 }
854
855 void AudioContext::handleDirtyAudioSummingJunctions()
856 {
857     ASSERT(isGraphOwner());    
858
859     for (auto& junction : m_dirtySummingJunctions)
860         junction->updateRenderingState();
861
862     m_dirtySummingJunctions.clear();
863 }
864
865 void AudioContext::handleDirtyAudioNodeOutputs()
866 {
867     ASSERT(isGraphOwner());    
868
869     for (auto& output : m_dirtyAudioNodeOutputs)
870         output->updateRenderingState();
871
872     m_dirtyAudioNodeOutputs.clear();
873 }
874
875 void AudioContext::addAutomaticPullNode(AudioNode* node)
876 {
877     ASSERT(isGraphOwner());
878
879     if (m_automaticPullNodes.add(node).isNewEntry)
880         m_automaticPullNodesNeedUpdating = true;
881 }
882
883 void AudioContext::removeAutomaticPullNode(AudioNode* node)
884 {
885     ASSERT(isGraphOwner());
886
887     if (m_automaticPullNodes.remove(node))
888         m_automaticPullNodesNeedUpdating = true;
889 }
890
891 void AudioContext::updateAutomaticPullNodes()
892 {
893     ASSERT(isGraphOwner());
894
895     if (m_automaticPullNodesNeedUpdating) {
896         // Copy from m_automaticPullNodes to m_renderingAutomaticPullNodes.
897         m_renderingAutomaticPullNodes.resize(m_automaticPullNodes.size());
898
899         unsigned i = 0;
900         for (auto& output : m_automaticPullNodes)
901             m_renderingAutomaticPullNodes[i++] = output;
902
903         m_automaticPullNodesNeedUpdating = false;
904     }
905 }
906
907 void AudioContext::processAutomaticPullNodes(size_t framesToProcess)
908 {
909     ASSERT(isAudioThread());
910
911     for (auto& node : m_renderingAutomaticPullNodes)
912         node->processIfNecessary(framesToProcess);
913 }
914
915 ScriptExecutionContext* AudioContext::scriptExecutionContext() const
916 {
917     return m_isStopScheduled ? 0 : ActiveDOMObject::scriptExecutionContext();
918 }
919
920 void AudioContext::nodeWillBeginPlayback()
921 {
922     // Called by scheduled AudioNodes when clients schedule their start times.
923     // Prior to the introduction of suspend(), resume(), and stop(), starting
924     // a scheduled AudioNode would remove the user-gesture restriction, if present,
925     // and would thus unmute the context. Now that AudioContext stays in the
926     // "suspended" state if a user-gesture restriction is present, starting a
927     // schedule AudioNode should set the state to "running", but only if the
928     // user-gesture restriction is set.
929     if (userGestureRequiredForAudioStart())
930         startRendering();
931 }
932
933 bool AudioContext::willBeginPlayback()
934 {
935     if (userGestureRequiredForAudioStart()) {
936         if (!ScriptController::processingUserGestureForMedia())
937             return false;
938         removeBehaviorRestriction(AudioContext::RequireUserGestureForAudioStartRestriction);
939     }
940
941     if (pageConsentRequiredForAudioStart()) {
942         Page* page = document()->page();
943         if (page && !page->canStartMedia()) {
944             document()->addMediaCanStartListener(this);
945             return false;
946         }
947         removeBehaviorRestriction(AudioContext::RequirePageConsentForAudioStartRestriction);
948     }
949
950     return m_mediaSession->clientWillBeginPlayback();
951 }
952
953 bool AudioContext::willPausePlayback()
954 {
955     if (userGestureRequiredForAudioStart()) {
956         if (!ScriptController::processingUserGestureForMedia())
957             return false;
958         removeBehaviorRestriction(AudioContext::RequireUserGestureForAudioStartRestriction);
959     }
960
961     if (pageConsentRequiredForAudioStart()) {
962         Page* page = document()->page();
963         if (page && !page->canStartMedia()) {
964             document()->addMediaCanStartListener(this);
965             return false;
966         }
967         removeBehaviorRestriction(AudioContext::RequirePageConsentForAudioStartRestriction);
968     }
969     
970     return m_mediaSession->clientWillPausePlayback();
971 }
972
973 void AudioContext::startRendering()
974 {
975     if (!willBeginPlayback())
976         return;
977
978     destination()->startRendering();
979     setState(State::Running);
980 }
981
982 void AudioContext::mediaCanStart(Document& document)
983 {
984     ASSERT_UNUSED(document, &document == this->document());
985     removeBehaviorRestriction(AudioContext::RequirePageConsentForAudioStartRestriction);
986     mayResumePlayback(true);
987 }
988
989 MediaProducer::MediaStateFlags AudioContext::mediaState() const
990 {
991     if (!m_isStopScheduled && m_destinationNode && m_destinationNode->isPlayingAudio())
992         return MediaProducer::IsPlayingAudio;
993
994     return MediaProducer::IsNotPlaying;
995 }
996
997 void AudioContext::pageMutedStateDidChange()
998 {
999     if (m_destinationNode && document()->page())
1000         m_destinationNode->setMuted(document()->page()->isAudioMuted());
1001 }
1002
1003 void AudioContext::isPlayingAudioDidChange()
1004 {
1005     // Make sure to call Document::updateIsPlayingMedia() on the main thread, since
1006     // we could be on the audio I/O thread here and the call into WebCore could block.
1007     callOnMainThread([protectedThis = makeRef(*this)] {
1008         if (protectedThis->document())
1009             protectedThis->document()->updateIsPlayingMedia();
1010     });
1011 }
1012
1013 void AudioContext::fireCompletionEvent()
1014 {
1015     ASSERT(isMainThread());
1016     if (!isMainThread())
1017         return;
1018         
1019     AudioBuffer* renderedBuffer = m_renderTarget.get();
1020     setState(State::Closed);
1021
1022     ASSERT(renderedBuffer);
1023     if (!renderedBuffer)
1024         return;
1025
1026     // Avoid firing the event if the document has already gone away.
1027     if (scriptExecutionContext()) {
1028         // Call the offline rendering completion event listener.
1029         m_eventQueue->enqueueEvent(OfflineAudioCompletionEvent::create(renderedBuffer));
1030     }
1031 }
1032
1033 void AudioContext::incrementActiveSourceCount()
1034 {
1035     ++m_activeSourceCount;
1036 }
1037
1038 void AudioContext::decrementActiveSourceCount()
1039 {
1040     --m_activeSourceCount;
1041 }
1042
1043 void AudioContext::suspend(DOMPromiseDeferred<void>&& promise)
1044 {
1045     if (isOfflineContext()) {
1046         promise.reject(INVALID_STATE_ERR);
1047         return;
1048     }
1049
1050     if (m_state == State::Suspended) {
1051         promise.resolve();
1052         return;
1053     }
1054
1055     if (m_state == State::Closed || m_state == State::Interrupted || !m_destinationNode) {
1056         promise.reject();
1057         return;
1058     }
1059
1060     addReaction(State::Suspended, WTFMove(promise));
1061
1062     if (!willPausePlayback())
1063         return;
1064
1065     lazyInitialize();
1066
1067     m_destinationNode->suspend([this, protectedThis = makeRef(*this)] {
1068         setState(State::Suspended);
1069     });
1070 }
1071
1072 void AudioContext::resume(DOMPromiseDeferred<void>&& promise)
1073 {
1074     if (isOfflineContext()) {
1075         promise.reject(INVALID_STATE_ERR);
1076         return;
1077     }
1078
1079     if (m_state == State::Running) {
1080         promise.resolve();
1081         return;
1082     }
1083
1084     if (m_state == State::Closed || !m_destinationNode) {
1085         promise.reject();
1086         return;
1087     }
1088
1089     addReaction(State::Running, WTFMove(promise));
1090
1091     if (!willBeginPlayback())
1092         return;
1093
1094     lazyInitialize();
1095
1096     m_destinationNode->resume([this, protectedThis = makeRef(*this)] {
1097         setState(State::Running);
1098     });
1099 }
1100
1101 void AudioContext::close(DOMPromiseDeferred<void>&& promise)
1102 {
1103     if (isOfflineContext()) {
1104         promise.reject(INVALID_STATE_ERR);
1105         return;
1106     }
1107
1108     if (m_state == State::Closed || !m_destinationNode) {
1109         promise.resolve();
1110         return;
1111     }
1112
1113     addReaction(State::Closed, WTFMove(promise));
1114
1115     lazyInitialize();
1116
1117     m_destinationNode->close([this, protectedThis = makeRef(*this)] {
1118         setState(State::Closed);
1119         uninitialize();
1120     });
1121 }
1122
1123
1124 void AudioContext::suspendPlayback()
1125 {
1126     if (!m_destinationNode || m_state == State::Closed)
1127         return;
1128
1129     if (m_state == State::Suspended) {
1130         if (m_mediaSession->state() == PlatformMediaSession::Interrupted)
1131             setState(State::Interrupted);
1132         return;
1133     }
1134
1135     lazyInitialize();
1136
1137     m_destinationNode->suspend([this, protectedThis = makeRef(*this)] {
1138         bool interrupted = m_mediaSession->state() == PlatformMediaSession::Interrupted;
1139         setState(interrupted ? State::Interrupted : State::Suspended);
1140     });
1141 }
1142
1143 void AudioContext::mayResumePlayback(bool shouldResume)
1144 {
1145     if (!m_destinationNode || m_state == State::Closed || m_state == State::Running)
1146         return;
1147
1148     if (!shouldResume) {
1149         setState(State::Suspended);
1150         return;
1151     }
1152
1153     if (!willBeginPlayback())
1154         return;
1155
1156     lazyInitialize();
1157
1158     m_destinationNode->resume([this, protectedThis = makeRef(*this)] {
1159         setState(State::Running);
1160     });
1161 }
1162
1163
1164 } // namespace WebCore
1165
1166 #endif // ENABLE(WEB_AUDIO)