Next batch of conversions to use C++ enum class instead of strings for enumerations
[WebKit-https.git] / Source / WebCore / Modules / webaudio / AudioContext.cpp
1 /*
2  * Copyright (C) 2010 Google Inc. All rights reserved.
3  * Copyright (C) 2016 Apple Inc. All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1.  Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2.  Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
15  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
16  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
17  * DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
18  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
19  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
20  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
21  * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
23  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24  */
25
26 #include "config.h"
27
28 #if ENABLE(WEB_AUDIO)
29
30 #include "AudioContext.h"
31
32 #include "AnalyserNode.h"
33 #include "AsyncAudioDecoder.h"
34 #include "AudioBuffer.h"
35 #include "AudioBufferCallback.h"
36 #include "AudioBufferSourceNode.h"
37 #include "AudioListener.h"
38 #include "AudioNodeInput.h"
39 #include "AudioNodeOutput.h"
40 #include "BiquadFilterNode.h"
41 #include "ChannelMergerNode.h"
42 #include "ChannelSplitterNode.h"
43 #include "ConvolverNode.h"
44 #include "DefaultAudioDestinationNode.h"
45 #include "DelayNode.h"
46 #include "Document.h"
47 #include "DynamicsCompressorNode.h"
48 #include "EventNames.h"
49 #include "ExceptionCode.h"
50 #include "FFTFrame.h"
51 #include "GainNode.h"
52 #include "GenericEventQueue.h"
53 #include "HRTFDatabaseLoader.h"
54 #include "HRTFPanner.h"
55 #include "JSDOMPromise.h"
56 #include "OfflineAudioCompletionEvent.h"
57 #include "OfflineAudioDestinationNode.h"
58 #include "OscillatorNode.h"
59 #include "Page.h"
60 #include "PannerNode.h"
61 #include "PeriodicWave.h"
62 #include "ScriptController.h"
63 #include "ScriptProcessorNode.h"
64 #include "WaveShaperNode.h"
65 #include <inspector/ScriptCallStack.h>
66 #include <wtf/NeverDestroyed.h>
67
68 #if ENABLE(MEDIA_STREAM)
69 #include "MediaStream.h"
70 #include "MediaStreamAudioDestinationNode.h"
71 #include "MediaStreamAudioSource.h"
72 #include "MediaStreamAudioSourceNode.h"
73 #endif
74
75 #if ENABLE(VIDEO)
76 #include "HTMLMediaElement.h"
77 #include "MediaElementAudioSourceNode.h"
78 #endif
79
80 #if DEBUG_AUDIONODE_REFERENCES
81 #include <stdio.h>
82 #endif
83
84 #if USE(GSTREAMER)
85 #include "GStreamerUtilities.h"
86 #endif
87
88 #if PLATFORM(IOS)
89 #include "ScriptController.h"
90 #include "Settings.h"
91 #endif
92
93 #include <runtime/ArrayBuffer.h>
94 #include <wtf/Atomics.h>
95 #include <wtf/MainThread.h>
96 #include <wtf/Ref.h>
97 #include <wtf/RefCounted.h>
98 #include <wtf/text/WTFString.h>
99
100 // FIXME: check the proper way to reference an undefined thread ID
101 const int UndefinedThreadIdentifier = 0xffffffff;
102
103 const unsigned MaxPeriodicWaveLength = 4096;
104
105 namespace WebCore {
106     
107 bool AudioContext::isSampleRateRangeGood(float sampleRate)
108 {
109     // FIXME: It would be nice if the minimum sample-rate could be less than 44.1KHz,
110     // but that will require some fixes in HRTFPanner::fftSizeForSampleRate(), and some testing there.
111     return sampleRate >= 44100 && sampleRate <= 96000;
112 }
113
114 // Don't allow more than this number of simultaneous AudioContexts talking to hardware.
115 const unsigned MaxHardwareContexts = 4;
116 unsigned AudioContext::s_hardwareContextCount = 0;
117     
118 RefPtr<AudioContext> AudioContext::create(Document& document, ExceptionCode& ec)
119 {
120     UNUSED_PARAM(ec);
121
122     ASSERT(isMainThread());
123     if (s_hardwareContextCount >= MaxHardwareContexts)
124         return nullptr;
125
126     RefPtr<AudioContext> audioContext(adoptRef(new AudioContext(document)));
127     audioContext->suspendIfNeeded();
128     return audioContext;
129 }
130
131 // Constructor for rendering to the audio hardware.
132 AudioContext::AudioContext(Document& document)
133     : ActiveDOMObject(&document)
134     , m_mediaSession(PlatformMediaSession::create(*this))
135     , m_eventQueue(std::make_unique<GenericEventQueue>(*this))
136     , m_graphOwnerThread(UndefinedThreadIdentifier)
137 {
138     constructCommon();
139
140     m_destinationNode = DefaultAudioDestinationNode::create(*this);
141
142     // Initialize the destination node's muted state to match the page's current muted state.
143     pageMutedStateDidChange();
144 }
145
146 // Constructor for offline (non-realtime) rendering.
147 AudioContext::AudioContext(Document& document, unsigned numberOfChannels, size_t numberOfFrames, float sampleRate)
148     : ActiveDOMObject(&document)
149     , m_isOfflineContext(true)
150     , m_mediaSession(PlatformMediaSession::create(*this))
151     , m_eventQueue(std::make_unique<GenericEventQueue>(*this))
152     , m_graphOwnerThread(UndefinedThreadIdentifier)
153 {
154     constructCommon();
155
156     // Create a new destination for offline rendering.
157     m_renderTarget = AudioBuffer::create(numberOfChannels, numberOfFrames, sampleRate);
158     m_destinationNode = OfflineAudioDestinationNode::create(*this, m_renderTarget.get());
159 }
160
161 void AudioContext::constructCommon()
162 {
163     // According to spec AudioContext must die only after page navigate.
164     // Lets mark it as ActiveDOMObject with pending activity and unmark it in clear method.
165     setPendingActivity(this);
166
167 #if USE(GSTREAMER)
168     initializeGStreamer();
169 #endif
170
171     FFTFrame::initialize();
172     
173     m_listener = AudioListener::create();
174
175 #if PLATFORM(IOS)
176     if (!document()->settings() || document()->settings()->audioPlaybackRequiresUserGesture())
177         addBehaviorRestriction(RequireUserGestureForAudioStartRestriction);
178     else
179         m_restrictions = NoRestrictions;
180 #endif
181
182 #if PLATFORM(COCOA)
183     addBehaviorRestriction(RequirePageConsentForAudioStartRestriction);
184 #endif
185
186     m_mediaSession->setCanProduceAudio(true);
187 }
188
189 AudioContext::~AudioContext()
190 {
191 #if DEBUG_AUDIONODE_REFERENCES
192     fprintf(stderr, "%p: AudioContext::~AudioContext()\n", this);
193 #endif
194     ASSERT(!m_isInitialized);
195     ASSERT(m_isStopScheduled);
196     ASSERT(m_nodesToDelete.isEmpty());
197     ASSERT(m_referencedNodes.isEmpty());
198     ASSERT(m_finishedNodes.isEmpty()); // FIXME (bug 105870): This assertion fails on tests sometimes.
199     ASSERT(m_automaticPullNodes.isEmpty());
200     if (m_automaticPullNodesNeedUpdating)
201         m_renderingAutomaticPullNodes.resize(m_automaticPullNodes.size());
202     ASSERT(m_renderingAutomaticPullNodes.isEmpty());
203     // FIXME: Can we assert that m_deferredFinishDerefList is empty?
204 }
205
206 void AudioContext::lazyInitialize()
207 {
208     if (m_isInitialized)
209         return;
210
211     // Don't allow the context to initialize a second time after it's already been explicitly uninitialized.
212     ASSERT(!m_isAudioThreadFinished);
213     if (m_isAudioThreadFinished)
214         return;
215
216     if (m_destinationNode) {
217         m_destinationNode->initialize();
218
219         if (!isOfflineContext()) {
220             document()->addAudioProducer(this);
221
222             // This starts the audio thread. The destination node's provideInput() method will now be called repeatedly to render audio.
223             // Each time provideInput() is called, a portion of the audio stream is rendered. Let's call this time period a "render quantum".
224             // NOTE: for now default AudioContext does not need an explicit startRendering() call from JavaScript.
225             // We may want to consider requiring it for symmetry with OfflineAudioContext.
226             startRendering();
227             ++s_hardwareContextCount;
228         }
229     }
230     m_isInitialized = true;
231 }
232
233 void AudioContext::clear()
234 {
235     // We have to release our reference to the destination node before the context will ever be deleted since the destination node holds a reference to the context.
236     if (m_destinationNode)
237         m_destinationNode = nullptr;
238
239     // Audio thread is dead. Nobody will schedule node deletion action. Let's do it ourselves.
240     do {
241         deleteMarkedNodes();
242         m_nodesToDelete.appendVector(m_nodesMarkedForDeletion);
243         m_nodesMarkedForDeletion.clear();
244     } while (m_nodesToDelete.size());
245
246     // It was set in constructCommon.
247     unsetPendingActivity(this);
248 }
249
250 void AudioContext::uninitialize()
251 {
252     ASSERT(isMainThread());
253
254     if (!m_isInitialized)
255         return;
256
257     // This stops the audio thread and all audio rendering.
258     m_destinationNode->uninitialize();
259
260     // Don't allow the context to initialize a second time after it's already been explicitly uninitialized.
261     m_isAudioThreadFinished = true;
262
263     if (!isOfflineContext()) {
264         document()->removeAudioProducer(this);
265
266         ASSERT(s_hardwareContextCount);
267         --s_hardwareContextCount;
268
269         // Offline contexts move to 'Closed' state when dispatching the completion event.
270         setState(State::Closed);
271     }
272
273     // Get rid of the sources which may still be playing.
274     derefUnfinishedSourceNodes();
275
276     m_isInitialized = false;
277 }
278
279 bool AudioContext::isInitialized() const
280 {
281     return m_isInitialized;
282 }
283
284 void AudioContext::addReaction(State state, Promise&& promise)
285 {
286     size_t stateIndex = static_cast<size_t>(state);
287     if (stateIndex >= m_stateReactions.size())
288         m_stateReactions.resize(stateIndex + 1);
289
290     m_stateReactions[stateIndex].append(WTFMove(promise));
291 }
292
293 void AudioContext::setState(State state)
294 {
295     if (m_state == state)
296         return;
297
298     m_state = state;
299     m_eventQueue->enqueueEvent(Event::create(eventNames().statechangeEvent, true, false));
300
301     size_t stateIndex = static_cast<size_t>(state);
302     if (stateIndex >= m_stateReactions.size())
303         return;
304
305     Vector<Promise> reactions;
306     m_stateReactions[stateIndex].swap(reactions);
307
308     for (auto& promise : reactions)
309         promise.resolve(nullptr);
310 }
311
312 void AudioContext::stop()
313 {
314     ASSERT(isMainThread());
315
316     // Usually ScriptExecutionContext calls stop twice.
317     if (m_isStopScheduled)
318         return;
319     m_isStopScheduled = true;
320
321     document()->updateIsPlayingMedia();
322
323     m_eventQueue->close();
324
325     // Don't call uninitialize() immediately here because the ScriptExecutionContext is in the middle
326     // of dealing with all of its ActiveDOMObjects at this point. uninitialize() can de-reference other
327     // ActiveDOMObjects so let's schedule uninitialize() to be called later.
328     // FIXME: see if there's a more direct way to handle this issue.
329     // FIXME: This sounds very wrong. The whole idea of stop() is that it stops everything, and if we
330     // schedule some observable work for later, the work likely happens at an inappropriate time.
331     callOnMainThread([this] {
332         uninitialize();
333         clear();
334     });
335 }
336
337 bool AudioContext::canSuspendForDocumentSuspension() const
338 {
339     // FIXME: We should be able to suspend while rendering as well with some more code.
340     return m_state == State::Suspended || m_state == State::Closed;
341 }
342
343 const char* AudioContext::activeDOMObjectName() const
344 {
345     return "AudioContext";
346 }
347
348 Document* AudioContext::document() const
349 {
350     ASSERT(m_scriptExecutionContext);
351     return downcast<Document>(m_scriptExecutionContext);
352 }
353
354 const Document* AudioContext::hostingDocument() const
355 {
356     return downcast<Document>(m_scriptExecutionContext);
357 }
358
359 RefPtr<AudioBuffer> AudioContext::createBuffer(unsigned numberOfChannels, size_t numberOfFrames, float sampleRate, ExceptionCode& ec)
360 {
361     RefPtr<AudioBuffer> audioBuffer = AudioBuffer::create(numberOfChannels, numberOfFrames, sampleRate);
362     if (!audioBuffer) {
363         ec = NOT_SUPPORTED_ERR;
364         return nullptr;
365     }
366
367     return audioBuffer;
368 }
369
370 RefPtr<AudioBuffer> AudioContext::createBuffer(ArrayBuffer& arrayBuffer, bool mixToMono, ExceptionCode& ec)
371 {
372     RefPtr<AudioBuffer> audioBuffer = AudioBuffer::createFromAudioFileData(arrayBuffer.data(), arrayBuffer.byteLength(), mixToMono, sampleRate());
373     if (!audioBuffer) {
374         ec = SYNTAX_ERR;
375         return nullptr;
376     }
377
378     return audioBuffer;
379 }
380
381 void AudioContext::decodeAudioData(Ref<ArrayBuffer>&& audioData, RefPtr<AudioBufferCallback>&& successCallback, RefPtr<AudioBufferCallback>&& errorCallback)
382 {
383     m_audioDecoder.decodeAsync(WTFMove(audioData), sampleRate(), WTFMove(successCallback), WTFMove(errorCallback));
384 }
385
386 Ref<AudioBufferSourceNode> AudioContext::createBufferSource()
387 {
388     ASSERT(isMainThread());
389     lazyInitialize();
390     Ref<AudioBufferSourceNode> node = AudioBufferSourceNode::create(*this, m_destinationNode->sampleRate());
391
392     // Because this is an AudioScheduledSourceNode, the context keeps a reference until it has finished playing.
393     // When this happens, AudioScheduledSourceNode::finish() calls AudioContext::notifyNodeFinishedProcessing().
394     refNode(node);
395
396     return node;
397 }
398
399 #if ENABLE(VIDEO)
400 RefPtr<MediaElementAudioSourceNode> AudioContext::createMediaElementSource(HTMLMediaElement& mediaElement, ExceptionCode& ec)
401 {
402     ASSERT(isMainThread());
403     lazyInitialize();
404     
405     // First check if this media element already has a source node.
406     if (mediaElement.audioSourceNode()) {
407         ec = INVALID_STATE_ERR;
408         return nullptr;
409     }
410         
411     Ref<MediaElementAudioSourceNode> node = MediaElementAudioSourceNode::create(*this, mediaElement);
412
413     mediaElement.setAudioSourceNode(node.ptr());
414
415     refNode(node.get()); // context keeps reference until node is disconnected
416     return WTFMove(node);
417 }
418 #endif
419
420 #if ENABLE(MEDIA_STREAM)
421 RefPtr<MediaStreamAudioSourceNode> AudioContext::createMediaStreamSource(MediaStream& mediaStream, ExceptionCode& ec)
422 {
423     ASSERT(isMainThread());
424
425     auto audioTracks = mediaStream.getAudioTracks();
426     if (audioTracks.isEmpty()) {
427         ec = INVALID_STATE_ERR;
428         return nullptr;
429     }
430
431     MediaStreamTrack* providerTrack = nullptr;
432     for (auto& track : audioTracks) {
433         if (track->audioSourceProvider()) {
434             providerTrack = track.get();
435             break;
436         }
437     }
438
439     if (!providerTrack) {
440         ec = INVALID_STATE_ERR;
441         return nullptr;
442     }
443
444     lazyInitialize();
445
446     auto node = MediaStreamAudioSourceNode::create(*this, mediaStream, *providerTrack);
447     node->setFormat(2, sampleRate());
448
449     refNode(node); // context keeps reference until node is disconnected
450     return WTFMove(node);
451 }
452
453 Ref<MediaStreamAudioDestinationNode> AudioContext::createMediaStreamDestination()
454 {
455     // FIXME: Add support for an optional argument which specifies the number of channels.
456     // FIXME: The default should probably be stereo instead of mono.
457     return MediaStreamAudioDestinationNode::create(*this, 1);
458 }
459
460 #endif
461
462 RefPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(size_t bufferSize, size_t numberOfInputChannels, size_t numberOfOutputChannels, ExceptionCode& ec)
463 {
464     ASSERT(isMainThread());
465     lazyInitialize();
466     RefPtr<ScriptProcessorNode> node = ScriptProcessorNode::create(*this, m_destinationNode->sampleRate(), bufferSize, numberOfInputChannels, numberOfOutputChannels);
467
468     if (!node) {
469         ec = INDEX_SIZE_ERR;
470         return nullptr;
471     }
472
473     refNode(*node); // context keeps reference until we stop making javascript rendering callbacks
474     return node;
475 }
476
477 Ref<BiquadFilterNode> AudioContext::createBiquadFilter()
478 {
479     ASSERT(isMainThread());
480     lazyInitialize();
481     return BiquadFilterNode::create(*this, m_destinationNode->sampleRate());
482 }
483
484 Ref<WaveShaperNode> AudioContext::createWaveShaper()
485 {
486     ASSERT(isMainThread());
487     lazyInitialize();
488     return WaveShaperNode::create(*this);
489 }
490
491 Ref<PannerNode> AudioContext::createPanner()
492 {
493     ASSERT(isMainThread());
494     lazyInitialize();
495     return PannerNode::create(*this, m_destinationNode->sampleRate());
496 }
497
498 Ref<ConvolverNode> AudioContext::createConvolver()
499 {
500     ASSERT(isMainThread());
501     lazyInitialize();
502     return ConvolverNode::create(*this, m_destinationNode->sampleRate());
503 }
504
505 Ref<DynamicsCompressorNode> AudioContext::createDynamicsCompressor()
506 {
507     ASSERT(isMainThread());
508     lazyInitialize();
509     return DynamicsCompressorNode::create(*this, m_destinationNode->sampleRate());
510 }
511
512 Ref<AnalyserNode> AudioContext::createAnalyser()
513 {
514     ASSERT(isMainThread());
515     lazyInitialize();
516     return AnalyserNode::create(*this, m_destinationNode->sampleRate());
517 }
518
519 Ref<GainNode> AudioContext::createGain()
520 {
521     ASSERT(isMainThread());
522     lazyInitialize();
523     return GainNode::create(*this, m_destinationNode->sampleRate());
524 }
525
526 RefPtr<DelayNode> AudioContext::createDelay(double maxDelayTime, ExceptionCode& ec)
527 {
528     ASSERT(isMainThread());
529     lazyInitialize();
530     Ref<DelayNode> node = DelayNode::create(*this, m_destinationNode->sampleRate(), maxDelayTime, ec);
531     if (ec)
532         return nullptr;
533     return WTFMove(node);
534 }
535
536 RefPtr<ChannelSplitterNode> AudioContext::createChannelSplitter(size_t numberOfOutputs, ExceptionCode& ec)
537 {
538     ASSERT(isMainThread());
539     lazyInitialize();
540
541     RefPtr<ChannelSplitterNode> node = ChannelSplitterNode::create(*this, m_destinationNode->sampleRate(), numberOfOutputs);
542
543     if (!node) {
544         ec = INDEX_SIZE_ERR;
545         return nullptr;
546     }
547
548     return node;
549 }
550
551 RefPtr<ChannelMergerNode> AudioContext::createChannelMerger(size_t numberOfInputs, ExceptionCode& ec)
552 {
553     ASSERT(isMainThread());
554     lazyInitialize();
555
556     RefPtr<ChannelMergerNode> node = ChannelMergerNode::create(*this, m_destinationNode->sampleRate(), numberOfInputs);
557
558     if (!node) {
559         ec = INDEX_SIZE_ERR;
560         return nullptr;
561     }
562
563     return node;
564 }
565
566 Ref<OscillatorNode> AudioContext::createOscillator()
567 {
568     ASSERT(isMainThread());
569     lazyInitialize();
570
571     Ref<OscillatorNode> node = OscillatorNode::create(*this, m_destinationNode->sampleRate());
572
573     // Because this is an AudioScheduledSourceNode, the context keeps a reference until it has finished playing.
574     // When this happens, AudioScheduledSourceNode::finish() calls AudioContext::notifyNodeFinishedProcessing().
575     refNode(node);
576
577     return node;
578 }
579
580 RefPtr<PeriodicWave> AudioContext::createPeriodicWave(Float32Array* real, Float32Array* imag, ExceptionCode& ec)
581 {
582     ASSERT(isMainThread());
583     
584     if (!real || !imag || (real->length() != imag->length() || (real->length() > MaxPeriodicWaveLength) || (real->length() <= 0))) {
585         ec = INDEX_SIZE_ERR;
586         return nullptr;
587     }
588     
589     lazyInitialize();
590     return PeriodicWave::create(sampleRate(), real, imag);
591 }
592
593 void AudioContext::notifyNodeFinishedProcessing(AudioNode* node)
594 {
595     ASSERT(isAudioThread());
596     m_finishedNodes.append(node);
597 }
598
599 void AudioContext::derefFinishedSourceNodes()
600 {
601     ASSERT(isGraphOwner());
602     ASSERT(isAudioThread() || isAudioThreadFinished());
603     for (auto& node : m_finishedNodes)
604         derefNode(*node);
605
606     m_finishedNodes.clear();
607 }
608
609 void AudioContext::refNode(AudioNode& node)
610 {
611     ASSERT(isMainThread());
612     AutoLocker locker(*this);
613     
614     node.ref(AudioNode::RefTypeConnection);
615     m_referencedNodes.append(&node);
616 }
617
618 void AudioContext::derefNode(AudioNode& node)
619 {
620     ASSERT(isGraphOwner());
621     
622     node.deref(AudioNode::RefTypeConnection);
623
624     ASSERT(m_referencedNodes.contains(&node));
625     m_referencedNodes.removeFirst(&node);
626 }
627
628 void AudioContext::derefUnfinishedSourceNodes()
629 {
630     ASSERT(isMainThread() && isAudioThreadFinished());
631     for (auto& node : m_referencedNodes)
632         node->deref(AudioNode::RefTypeConnection);
633
634     m_referencedNodes.clear();
635 }
636
637 void AudioContext::lock(bool& mustReleaseLock)
638 {
639     // Don't allow regular lock in real-time audio thread.
640     ASSERT(isMainThread());
641
642     ThreadIdentifier thisThread = currentThread();
643
644     if (thisThread == m_graphOwnerThread) {
645         // We already have the lock.
646         mustReleaseLock = false;
647     } else {
648         // Acquire the lock.
649         m_contextGraphMutex.lock();
650         m_graphOwnerThread = thisThread;
651         mustReleaseLock = true;
652     }
653 }
654
655 bool AudioContext::tryLock(bool& mustReleaseLock)
656 {
657     ThreadIdentifier thisThread = currentThread();
658     bool isAudioThread = thisThread == audioThread();
659
660     // Try to catch cases of using try lock on main thread - it should use regular lock.
661     ASSERT(isAudioThread || isAudioThreadFinished());
662     
663     if (!isAudioThread) {
664         // In release build treat tryLock() as lock() (since above ASSERT(isAudioThread) never fires) - this is the best we can do.
665         lock(mustReleaseLock);
666         return true;
667     }
668     
669     bool hasLock;
670     
671     if (thisThread == m_graphOwnerThread) {
672         // Thread already has the lock.
673         hasLock = true;
674         mustReleaseLock = false;
675     } else {
676         // Don't already have the lock - try to acquire it.
677         hasLock = m_contextGraphMutex.tryLock();
678         
679         if (hasLock)
680             m_graphOwnerThread = thisThread;
681
682         mustReleaseLock = hasLock;
683     }
684     
685     return hasLock;
686 }
687
688 void AudioContext::unlock()
689 {
690     ASSERT(currentThread() == m_graphOwnerThread);
691
692     m_graphOwnerThread = UndefinedThreadIdentifier;
693     m_contextGraphMutex.unlock();
694 }
695
696 bool AudioContext::isAudioThread() const
697 {
698     return currentThread() == m_audioThread;
699 }
700
701 bool AudioContext::isGraphOwner() const
702 {
703     return currentThread() == m_graphOwnerThread;
704 }
705
706 void AudioContext::addDeferredFinishDeref(AudioNode* node)
707 {
708     ASSERT(isAudioThread());
709     m_deferredFinishDerefList.append(node);
710 }
711
712 void AudioContext::handlePreRenderTasks()
713 {
714     ASSERT(isAudioThread());
715
716     // At the beginning of every render quantum, try to update the internal rendering graph state (from main thread changes).
717     // It's OK if the tryLock() fails, we'll just take slightly longer to pick up the changes.
718     bool mustReleaseLock;
719     if (tryLock(mustReleaseLock)) {
720         // Fixup the state of any dirty AudioSummingJunctions and AudioNodeOutputs.
721         handleDirtyAudioSummingJunctions();
722         handleDirtyAudioNodeOutputs();
723
724         updateAutomaticPullNodes();
725
726         if (mustReleaseLock)
727             unlock();
728     }
729 }
730
731 void AudioContext::handlePostRenderTasks()
732 {
733     ASSERT(isAudioThread());
734
735     // Must use a tryLock() here too. Don't worry, the lock will very rarely be contended and this method is called frequently.
736     // The worst that can happen is that there will be some nodes which will take slightly longer than usual to be deleted or removed
737     // from the render graph (in which case they'll render silence).
738     bool mustReleaseLock;
739     if (tryLock(mustReleaseLock)) {
740         // Take care of finishing any derefs where the tryLock() failed previously.
741         handleDeferredFinishDerefs();
742
743         // Dynamically clean up nodes which are no longer needed.
744         derefFinishedSourceNodes();
745
746         // Don't delete in the real-time thread. Let the main thread do it.
747         // Ref-counted objects held by certain AudioNodes may not be thread-safe.
748         scheduleNodeDeletion();
749
750         // Fixup the state of any dirty AudioSummingJunctions and AudioNodeOutputs.
751         handleDirtyAudioSummingJunctions();
752         handleDirtyAudioNodeOutputs();
753
754         updateAutomaticPullNodes();
755
756         if (mustReleaseLock)
757             unlock();
758     }
759 }
760
761 void AudioContext::handleDeferredFinishDerefs()
762 {
763     ASSERT(isAudioThread() && isGraphOwner());
764     for (auto& node : m_deferredFinishDerefList)
765         node->finishDeref(AudioNode::RefTypeConnection);
766     
767     m_deferredFinishDerefList.clear();
768 }
769
770 void AudioContext::markForDeletion(AudioNode* node)
771 {
772     ASSERT(isGraphOwner());
773
774     if (isAudioThreadFinished())
775         m_nodesToDelete.append(node);
776     else
777         m_nodesMarkedForDeletion.append(node);
778
779     // This is probably the best time for us to remove the node from automatic pull list,
780     // since all connections are gone and we hold the graph lock. Then when handlePostRenderTasks()
781     // gets a chance to schedule the deletion work, updateAutomaticPullNodes() also gets a chance to
782     // modify m_renderingAutomaticPullNodes.
783     removeAutomaticPullNode(node);
784 }
785
786 void AudioContext::scheduleNodeDeletion()
787 {
788     bool isGood = m_isInitialized && isGraphOwner();
789     ASSERT(isGood);
790     if (!isGood)
791         return;
792
793     // Make sure to call deleteMarkedNodes() on main thread.    
794     if (m_nodesMarkedForDeletion.size() && !m_isDeletionScheduled) {
795         m_nodesToDelete.appendVector(m_nodesMarkedForDeletion);
796         m_nodesMarkedForDeletion.clear();
797
798         m_isDeletionScheduled = true;
799
800         RefPtr<AudioContext> strongThis(this);
801         callOnMainThread([strongThis] {
802             strongThis->deleteMarkedNodes();
803         });
804     }
805 }
806
807 void AudioContext::deleteMarkedNodes()
808 {
809     ASSERT(isMainThread());
810
811     // Protect this object from being deleted before we release the mutex locked by AutoLocker.
812     Ref<AudioContext> protect(*this);
813     {
814         AutoLocker locker(*this);
815
816         while (m_nodesToDelete.size()) {
817             AudioNode* node = m_nodesToDelete.takeLast();
818
819             // Before deleting the node, clear out any AudioNodeInputs from m_dirtySummingJunctions.
820             unsigned numberOfInputs = node->numberOfInputs();
821             for (unsigned i = 0; i < numberOfInputs; ++i)
822                 m_dirtySummingJunctions.remove(node->input(i));
823
824             // Before deleting the node, clear out any AudioNodeOutputs from m_dirtyAudioNodeOutputs.
825             unsigned numberOfOutputs = node->numberOfOutputs();
826             for (unsigned i = 0; i < numberOfOutputs; ++i)
827                 m_dirtyAudioNodeOutputs.remove(node->output(i));
828
829             // Finally, delete it.
830             delete node;
831         }
832         m_isDeletionScheduled = false;
833     }
834 }
835
836 void AudioContext::markSummingJunctionDirty(AudioSummingJunction* summingJunction)
837 {
838     ASSERT(isGraphOwner());    
839     m_dirtySummingJunctions.add(summingJunction);
840 }
841
842 void AudioContext::removeMarkedSummingJunction(AudioSummingJunction* summingJunction)
843 {
844     ASSERT(isMainThread());
845     AutoLocker locker(*this);
846     m_dirtySummingJunctions.remove(summingJunction);
847 }
848
849 void AudioContext::markAudioNodeOutputDirty(AudioNodeOutput* output)
850 {
851     ASSERT(isGraphOwner());    
852     m_dirtyAudioNodeOutputs.add(output);
853 }
854
855 void AudioContext::handleDirtyAudioSummingJunctions()
856 {
857     ASSERT(isGraphOwner());    
858
859     for (auto& junction : m_dirtySummingJunctions)
860         junction->updateRenderingState();
861
862     m_dirtySummingJunctions.clear();
863 }
864
865 void AudioContext::handleDirtyAudioNodeOutputs()
866 {
867     ASSERT(isGraphOwner());    
868
869     for (auto& output : m_dirtyAudioNodeOutputs)
870         output->updateRenderingState();
871
872     m_dirtyAudioNodeOutputs.clear();
873 }
874
875 void AudioContext::addAutomaticPullNode(AudioNode* node)
876 {
877     ASSERT(isGraphOwner());
878
879     if (m_automaticPullNodes.add(node).isNewEntry)
880         m_automaticPullNodesNeedUpdating = true;
881 }
882
883 void AudioContext::removeAutomaticPullNode(AudioNode* node)
884 {
885     ASSERT(isGraphOwner());
886
887     if (m_automaticPullNodes.remove(node))
888         m_automaticPullNodesNeedUpdating = true;
889 }
890
891 void AudioContext::updateAutomaticPullNodes()
892 {
893     ASSERT(isGraphOwner());
894
895     if (m_automaticPullNodesNeedUpdating) {
896         // Copy from m_automaticPullNodes to m_renderingAutomaticPullNodes.
897         m_renderingAutomaticPullNodes.resize(m_automaticPullNodes.size());
898
899         unsigned i = 0;
900         for (auto& output : m_automaticPullNodes)
901             m_renderingAutomaticPullNodes[i++] = output;
902
903         m_automaticPullNodesNeedUpdating = false;
904     }
905 }
906
907 void AudioContext::processAutomaticPullNodes(size_t framesToProcess)
908 {
909     ASSERT(isAudioThread());
910
911     for (auto& node : m_renderingAutomaticPullNodes)
912         node->processIfNecessary(framesToProcess);
913 }
914
915 ScriptExecutionContext* AudioContext::scriptExecutionContext() const
916 {
917     return m_isStopScheduled ? 0 : ActiveDOMObject::scriptExecutionContext();
918 }
919
920 void AudioContext::nodeWillBeginPlayback()
921 {
922     // Called by scheduled AudioNodes when clients schedule their start times.
923     // Prior to the introduction of suspend(), resume(), and stop(), starting
924     // a scheduled AudioNode would remove the user-gesture restriction, if present,
925     // and would thus unmute the context. Now that AudioContext stays in the
926     // "suspended" state if a user-gesture restriction is present, starting a
927     // schedule AudioNode should set the state to "running", but only if the
928     // user-gesture restriction is set.
929     if (userGestureRequiredForAudioStart())
930         startRendering();
931 }
932
933 bool AudioContext::willBeginPlayback()
934 {
935     if (userGestureRequiredForAudioStart()) {
936         if (!ScriptController::processingUserGestureForMedia())
937             return false;
938         removeBehaviorRestriction(AudioContext::RequireUserGestureForAudioStartRestriction);
939     }
940
941     if (pageConsentRequiredForAudioStart()) {
942         Page* page = document()->page();
943         if (page && !page->canStartMedia()) {
944             document()->addMediaCanStartListener(this);
945             return false;
946         }
947         removeBehaviorRestriction(AudioContext::RequirePageConsentForAudioStartRestriction);
948     }
949
950     return m_mediaSession->clientWillBeginPlayback();
951 }
952
953 bool AudioContext::willPausePlayback()
954 {
955     if (userGestureRequiredForAudioStart()) {
956         if (!ScriptController::processingUserGestureForMedia())
957             return false;
958         removeBehaviorRestriction(AudioContext::RequireUserGestureForAudioStartRestriction);
959     }
960
961     if (pageConsentRequiredForAudioStart()) {
962         Page* page = document()->page();
963         if (page && !page->canStartMedia()) {
964             document()->addMediaCanStartListener(this);
965             return false;
966         }
967         removeBehaviorRestriction(AudioContext::RequirePageConsentForAudioStartRestriction);
968     }
969     
970     return m_mediaSession->clientWillPausePlayback();
971 }
972
973 void AudioContext::startRendering()
974 {
975     if (!willBeginPlayback())
976         return;
977
978     destination()->startRendering();
979     setState(State::Running);
980 }
981
982 void AudioContext::mediaCanStart()
983 {
984     removeBehaviorRestriction(AudioContext::RequirePageConsentForAudioStartRestriction);
985 }
986
987 MediaProducer::MediaStateFlags AudioContext::mediaState() const
988 {
989     if (!m_isStopScheduled && m_destinationNode && m_destinationNode->isPlayingAudio())
990         return MediaProducer::IsPlayingAudio;
991
992     return MediaProducer::IsNotPlaying;
993 }
994
995 void AudioContext::pageMutedStateDidChange()
996 {
997     if (m_destinationNode && document()->page())
998         m_destinationNode->setMuted(document()->page()->isMuted());
999 }
1000
1001 void AudioContext::isPlayingAudioDidChange()
1002 {
1003     // Make sure to call Document::updateIsPlayingMedia() on the main thread, since
1004     // we could be on the audio I/O thread here and the call into WebCore could block.
1005     RefPtr<AudioContext> strongThis(this);
1006     callOnMainThread([strongThis] {
1007         if (strongThis->document())
1008             strongThis->document()->updateIsPlayingMedia();
1009     });
1010 }
1011
1012 void AudioContext::fireCompletionEvent()
1013 {
1014     ASSERT(isMainThread());
1015     if (!isMainThread())
1016         return;
1017         
1018     AudioBuffer* renderedBuffer = m_renderTarget.get();
1019     setState(State::Closed);
1020
1021     ASSERT(renderedBuffer);
1022     if (!renderedBuffer)
1023         return;
1024
1025     // Avoid firing the event if the document has already gone away.
1026     if (scriptExecutionContext()) {
1027         // Call the offline rendering completion event listener.
1028         m_eventQueue->enqueueEvent(OfflineAudioCompletionEvent::create(renderedBuffer));
1029     }
1030 }
1031
1032 void AudioContext::incrementActiveSourceCount()
1033 {
1034     ++m_activeSourceCount;
1035 }
1036
1037 void AudioContext::decrementActiveSourceCount()
1038 {
1039     --m_activeSourceCount;
1040 }
1041
1042 void AudioContext::suspend(Promise&& promise)
1043 {
1044     if (isOfflineContext()) {
1045         promise.reject(INVALID_STATE_ERR);
1046         return;
1047     }
1048
1049     if (m_state == State::Suspended) {
1050         promise.resolve(nullptr);
1051         return;
1052     }
1053
1054     if (m_state == State::Closed || m_state == State::Interrupted || !m_destinationNode) {
1055         promise.reject(0);
1056         return;
1057     }
1058
1059     addReaction(State::Suspended, WTFMove(promise));
1060
1061     if (!willPausePlayback())
1062         return;
1063
1064     lazyInitialize();
1065
1066     RefPtr<AudioContext> strongThis(this);
1067     m_destinationNode->suspend([strongThis] {
1068         strongThis->setState(State::Suspended);
1069     });
1070 }
1071
1072 void AudioContext::resume(Promise&& promise)
1073 {
1074     if (isOfflineContext()) {
1075         promise.reject(INVALID_STATE_ERR);
1076         return;
1077     }
1078
1079     if (m_state == State::Running) {
1080         promise.resolve(nullptr);
1081         return;
1082     }
1083
1084     if (m_state == State::Closed || !m_destinationNode) {
1085         promise.reject(0);
1086         return;
1087     }
1088
1089     addReaction(State::Running, WTFMove(promise));
1090
1091     if (!willBeginPlayback())
1092         return;
1093
1094     lazyInitialize();
1095
1096     RefPtr<AudioContext> strongThis(this);
1097     m_destinationNode->resume([strongThis] {
1098         strongThis->setState(State::Running);
1099     });
1100 }
1101
1102 void AudioContext::close(Promise&& promise)
1103 {
1104     if (isOfflineContext()) {
1105         promise.reject(INVALID_STATE_ERR);
1106         return;
1107     }
1108
1109     if (m_state == State::Closed || !m_destinationNode) {
1110         promise.resolve(nullptr);
1111         return;
1112     }
1113
1114     addReaction(State::Closed, WTFMove(promise));
1115
1116     lazyInitialize();
1117
1118     RefPtr<AudioContext> strongThis(this);
1119     m_destinationNode->close([strongThis] {
1120         strongThis->setState(State::Closed);
1121         strongThis->uninitialize();
1122     });
1123 }
1124
1125
1126 void AudioContext::suspendPlayback()
1127 {
1128     if (!m_destinationNode || m_state == State::Closed)
1129         return;
1130
1131     if (m_state == State::Suspended) {
1132         if (m_mediaSession->state() == PlatformMediaSession::Interrupted)
1133             setState(State::Interrupted);
1134         return;
1135     }
1136
1137     lazyInitialize();
1138
1139     RefPtr<AudioContext> strongThis(this);
1140     m_destinationNode->suspend([strongThis] {
1141         bool interrupted = strongThis->m_mediaSession->state() == PlatformMediaSession::Interrupted;
1142         strongThis->setState(interrupted ? State::Interrupted : State::Suspended);
1143     });
1144 }
1145
1146 void AudioContext::mayResumePlayback(bool shouldResume)
1147 {
1148     if (!m_destinationNode || m_state == State::Closed || m_state == State::Running)
1149         return;
1150
1151     if (!shouldResume) {
1152         setState(State::Suspended);
1153         return;
1154     }
1155
1156     if (!willBeginPlayback())
1157         return;
1158
1159     lazyInitialize();
1160
1161     RefPtr<AudioContext> strongThis(this);
1162     m_destinationNode->resume([strongThis] {
1163         strongThis->setState(State::Running);
1164     });
1165 }
1166
1167
1168 } // namespace WebCore
1169
1170 #endif // ENABLE(WEB_AUDIO)