ef28f979079b9062c019f6bb0e349dbfb530b11d
[WebKit-https.git] / Source / WebCore / Modules / webaudio / AudioContext.cpp
1 /*
2  * Copyright (C) 2010, Google Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1.  Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2.  Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
15  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
16  * DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
17  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
18  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
19  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
20  * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
21  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
22  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
23  */
24
25 #include "config.h"
26
27 #if ENABLE(WEB_AUDIO)
28
29 #include "AudioContext.h"
30
31 #include "AnalyserNode.h"
32 #include "AsyncAudioDecoder.h"
33 #include "AudioBuffer.h"
34 #include "AudioBufferCallback.h"
35 #include "AudioBufferSourceNode.h"
36 #include "AudioListener.h"
37 #include "AudioNodeInput.h"
38 #include "AudioNodeOutput.h"
39 #include "BiquadFilterNode.h"
40 #include "ChannelMergerNode.h"
41 #include "ChannelSplitterNode.h"
42 #include "ConvolverNode.h"
43 #include "DefaultAudioDestinationNode.h"
44 #include "DelayNode.h"
45 #include "Document.h"
46 #include "DynamicsCompressorNode.h"
47 #include "EventNames.h"
48 #include "ExceptionCode.h"
49 #include "FFTFrame.h"
50 #include "GainNode.h"
51 #include "GenericEventQueue.h"
52 #include "HRTFDatabaseLoader.h"
53 #include "HRTFPanner.h"
54 #include "JSDOMPromise.h"
55 #include "OfflineAudioCompletionEvent.h"
56 #include "OfflineAudioDestinationNode.h"
57 #include "OscillatorNode.h"
58 #include "Page.h"
59 #include "PannerNode.h"
60 #include "PeriodicWave.h"
61 #include "ScriptController.h"
62 #include "ScriptProcessorNode.h"
63 #include "WaveShaperNode.h"
64 #include <inspector/ScriptCallStack.h>
65 #include <wtf/NeverDestroyed.h>
66
67 #if ENABLE(MEDIA_STREAM)
68 #include "MediaStream.h"
69 #include "MediaStreamAudioDestinationNode.h"
70 #include "MediaStreamAudioSource.h"
71 #include "MediaStreamAudioSourceNode.h"
72 #endif
73
74 #if ENABLE(VIDEO)
75 #include "HTMLMediaElement.h"
76 #include "MediaElementAudioSourceNode.h"
77 #endif
78
79 #if DEBUG_AUDIONODE_REFERENCES
80 #include <stdio.h>
81 #endif
82
83 #if USE(GSTREAMER)
84 #include "GStreamerUtilities.h"
85 #endif
86
87 #if PLATFORM(IOS)
88 #include "ScriptController.h"
89 #include "Settings.h"
90 #endif
91
92 #include <runtime/ArrayBuffer.h>
93 #include <wtf/Atomics.h>
94 #include <wtf/MainThread.h>
95 #include <wtf/Ref.h>
96 #include <wtf/RefCounted.h>
97 #include <wtf/text/WTFString.h>
98
99 // FIXME: check the proper way to reference an undefined thread ID
100 const int UndefinedThreadIdentifier = 0xffffffff;
101
102 const unsigned MaxPeriodicWaveLength = 4096;
103
104 namespace WebCore {
105     
106 bool AudioContext::isSampleRateRangeGood(float sampleRate)
107 {
108     // FIXME: It would be nice if the minimum sample-rate could be less than 44.1KHz,
109     // but that will require some fixes in HRTFPanner::fftSizeForSampleRate(), and some testing there.
110     return sampleRate >= 44100 && sampleRate <= 96000;
111 }
112
113 // Don't allow more than this number of simultaneous AudioContexts talking to hardware.
114 const unsigned MaxHardwareContexts = 4;
115 unsigned AudioContext::s_hardwareContextCount = 0;
116     
117 RefPtr<AudioContext> AudioContext::create(Document& document, ExceptionCode& ec)
118 {
119     UNUSED_PARAM(ec);
120
121     ASSERT(isMainThread());
122     if (s_hardwareContextCount >= MaxHardwareContexts)
123         return nullptr;
124
125     RefPtr<AudioContext> audioContext(adoptRef(new AudioContext(document)));
126     audioContext->suspendIfNeeded();
127     return audioContext;
128 }
129
130 // Constructor for rendering to the audio hardware.
131 AudioContext::AudioContext(Document& document)
132     : ActiveDOMObject(&document)
133     , m_mediaSession(PlatformMediaSession::create(*this))
134     , m_eventQueue(std::make_unique<GenericEventQueue>(*this))
135     , m_graphOwnerThread(UndefinedThreadIdentifier)
136 {
137     constructCommon();
138
139     m_destinationNode = DefaultAudioDestinationNode::create(this);
140
141     // Initialize the destination node's muted state to match the page's current muted state.
142     pageMutedStateDidChange();
143 }
144
145 // Constructor for offline (non-realtime) rendering.
146 AudioContext::AudioContext(Document& document, unsigned numberOfChannels, size_t numberOfFrames, float sampleRate)
147     : ActiveDOMObject(&document)
148     , m_isOfflineContext(true)
149     , m_mediaSession(PlatformMediaSession::create(*this))
150     , m_eventQueue(std::make_unique<GenericEventQueue>(*this))
151     , m_graphOwnerThread(UndefinedThreadIdentifier)
152 {
153     constructCommon();
154
155     // Create a new destination for offline rendering.
156     m_renderTarget = AudioBuffer::create(numberOfChannels, numberOfFrames, sampleRate);
157     m_destinationNode = OfflineAudioDestinationNode::create(this, m_renderTarget.get());
158 }
159
160 void AudioContext::constructCommon()
161 {
162     // According to spec AudioContext must die only after page navigate.
163     // Lets mark it as ActiveDOMObject with pending activity and unmark it in clear method.
164     setPendingActivity(this);
165
166 #if USE(GSTREAMER)
167     initializeGStreamer();
168 #endif
169
170     FFTFrame::initialize();
171     
172     m_listener = AudioListener::create();
173
174 #if PLATFORM(IOS)
175     if (!document()->settings() || document()->settings()->requiresUserGestureForMediaPlayback())
176         addBehaviorRestriction(RequireUserGestureForAudioStartRestriction);
177     else
178         m_restrictions = NoRestrictions;
179 #endif
180
181 #if PLATFORM(COCOA)
182     addBehaviorRestriction(RequirePageConsentForAudioStartRestriction);
183 #endif
184
185     m_mediaSession->setCanProduceAudio(true);
186 }
187
188 AudioContext::~AudioContext()
189 {
190 #if DEBUG_AUDIONODE_REFERENCES
191     fprintf(stderr, "%p: AudioContext::~AudioContext()\n", this);
192 #endif
193     ASSERT(!m_isInitialized);
194     ASSERT(m_isStopScheduled);
195     ASSERT(m_nodesToDelete.isEmpty());
196     ASSERT(m_referencedNodes.isEmpty());
197     ASSERT(m_finishedNodes.isEmpty()); // FIXME (bug 105870): This assertion fails on tests sometimes.
198     ASSERT(m_automaticPullNodes.isEmpty());
199     if (m_automaticPullNodesNeedUpdating)
200         m_renderingAutomaticPullNodes.resize(m_automaticPullNodes.size());
201     ASSERT(m_renderingAutomaticPullNodes.isEmpty());
202     // FIXME: Can we assert that m_deferredFinishDerefList is empty?
203 }
204
205 void AudioContext::lazyInitialize()
206 {
207     if (m_isInitialized)
208         return;
209
210     // Don't allow the context to initialize a second time after it's already been explicitly uninitialized.
211     ASSERT(!m_isAudioThreadFinished);
212     if (m_isAudioThreadFinished)
213         return;
214
215     if (m_destinationNode.get()) {
216         m_destinationNode->initialize();
217
218         if (!isOfflineContext()) {
219             document()->addAudioProducer(this);
220
221             // This starts the audio thread. The destination node's provideInput() method will now be called repeatedly to render audio.
222             // Each time provideInput() is called, a portion of the audio stream is rendered. Let's call this time period a "render quantum".
223             // NOTE: for now default AudioContext does not need an explicit startRendering() call from JavaScript.
224             // We may want to consider requiring it for symmetry with OfflineAudioContext.
225             startRendering();
226             ++s_hardwareContextCount;
227         }
228     }
229     m_isInitialized = true;
230 }
231
232 void AudioContext::clear()
233 {
234     // We have to release our reference to the destination node before the context will ever be deleted since the destination node holds a reference to the context.
235     if (m_destinationNode)
236         m_destinationNode = nullptr;
237
238     // Audio thread is dead. Nobody will schedule node deletion action. Let's do it ourselves.
239     do {
240         deleteMarkedNodes();
241         m_nodesToDelete.appendVector(m_nodesMarkedForDeletion);
242         m_nodesMarkedForDeletion.clear();
243     } while (m_nodesToDelete.size());
244
245     // It was set in constructCommon.
246     unsetPendingActivity(this);
247 }
248
249 void AudioContext::uninitialize()
250 {
251     ASSERT(isMainThread());
252
253     if (!m_isInitialized)
254         return;
255
256     // This stops the audio thread and all audio rendering.
257     m_destinationNode->uninitialize();
258
259     // Don't allow the context to initialize a second time after it's already been explicitly uninitialized.
260     m_isAudioThreadFinished = true;
261
262     if (!isOfflineContext()) {
263         document()->removeAudioProducer(this);
264
265         ASSERT(s_hardwareContextCount);
266         --s_hardwareContextCount;
267
268         // Offline contexts move to 'Closed' state when dispatching the completion event.
269         setState(State::Closed);
270     }
271
272     // Get rid of the sources which may still be playing.
273     derefUnfinishedSourceNodes();
274
275     m_isInitialized = false;
276 }
277
278 bool AudioContext::isInitialized() const
279 {
280     return m_isInitialized;
281 }
282
283 void AudioContext::addReaction(State state, Promise&& promise)
284 {
285     size_t stateIndex = static_cast<size_t>(state);
286     if (stateIndex >= m_stateReactions.size())
287         m_stateReactions.resize(stateIndex + 1);
288
289     m_stateReactions[stateIndex].append(WTFMove(promise));
290 }
291
292 void AudioContext::setState(State state)
293 {
294     if (m_state == state)
295         return;
296
297     m_state = state;
298     m_eventQueue->enqueueEvent(Event::create(eventNames().statechangeEvent, true, false));
299
300     size_t stateIndex = static_cast<size_t>(state);
301     if (stateIndex >= m_stateReactions.size())
302         return;
303
304     Vector<Promise> reactions;
305     m_stateReactions[stateIndex].swap(reactions);
306
307     for (auto& promise : reactions)
308         promise.resolve(nullptr);
309 }
310
311 const AtomicString& AudioContext::state() const
312 {
313     static NeverDestroyed<AtomicString> suspended("suspended");
314     static NeverDestroyed<AtomicString> running("running");
315     static NeverDestroyed<AtomicString> interrupted("interrupted");
316     static NeverDestroyed<AtomicString> closed("closed");
317
318     switch (m_state) {
319     case State::Suspended:
320         return suspended;
321     case State::Running:
322         return running;
323     case State::Interrupted:
324         return interrupted;
325     case State::Closed:
326         return closed;
327     }
328
329     ASSERT_NOT_REACHED();
330     return suspended;
331 }
332
333 void AudioContext::stop()
334 {
335     ASSERT(isMainThread());
336
337     // Usually ScriptExecutionContext calls stop twice.
338     if (m_isStopScheduled)
339         return;
340     m_isStopScheduled = true;
341
342     document()->updateIsPlayingMedia();
343
344     m_eventQueue->close();
345
346     // Don't call uninitialize() immediately here because the ScriptExecutionContext is in the middle
347     // of dealing with all of its ActiveDOMObjects at this point. uninitialize() can de-reference other
348     // ActiveDOMObjects so let's schedule uninitialize() to be called later.
349     // FIXME: see if there's a more direct way to handle this issue.
350     // FIXME: This sounds very wrong. The whole idea of stop() is that it stops everything, and if we
351     // schedule some observable work for later, the work likely happens at an inappropriate time.
352     callOnMainThread([this] {
353         uninitialize();
354         clear();
355     });
356 }
357
358 bool AudioContext::canSuspendForDocumentSuspension() const
359 {
360     // FIXME: We should be able to suspend while rendering as well with some more code.
361     return m_state == State::Suspended || m_state == State::Closed;
362 }
363
364 const char* AudioContext::activeDOMObjectName() const
365 {
366     return "AudioContext";
367 }
368
369 Document* AudioContext::document() const
370 {
371     ASSERT(m_scriptExecutionContext);
372     return downcast<Document>(m_scriptExecutionContext);
373 }
374
375 const Document* AudioContext::hostingDocument() const
376 {
377     return downcast<Document>(m_scriptExecutionContext);
378 }
379
380 PassRefPtr<AudioBuffer> AudioContext::createBuffer(unsigned numberOfChannels, size_t numberOfFrames, float sampleRate, ExceptionCode& ec)
381 {
382     RefPtr<AudioBuffer> audioBuffer = AudioBuffer::create(numberOfChannels, numberOfFrames, sampleRate);
383     if (!audioBuffer.get()) {
384         ec = NOT_SUPPORTED_ERR;
385         return nullptr;
386     }
387
388     return audioBuffer;
389 }
390
391 PassRefPtr<AudioBuffer> AudioContext::createBuffer(ArrayBuffer* arrayBuffer, bool mixToMono, ExceptionCode& ec)
392 {
393     ASSERT(arrayBuffer);
394     if (!arrayBuffer) {
395         ec = SYNTAX_ERR;
396         return nullptr;
397     }
398
399     RefPtr<AudioBuffer> audioBuffer = AudioBuffer::createFromAudioFileData(arrayBuffer->data(), arrayBuffer->byteLength(), mixToMono, sampleRate());
400     if (!audioBuffer.get()) {
401         ec = SYNTAX_ERR;
402         return nullptr;
403     }
404
405     return audioBuffer;
406 }
407
408 void AudioContext::decodeAudioData(ArrayBuffer* audioData, PassRefPtr<AudioBufferCallback> successCallback, PassRefPtr<AudioBufferCallback> errorCallback, ExceptionCode& ec)
409 {
410     if (!audioData) {
411         ec = SYNTAX_ERR;
412         return;
413     }
414     m_audioDecoder.decodeAsync(audioData, sampleRate(), successCallback, errorCallback);
415 }
416
417 PassRefPtr<AudioBufferSourceNode> AudioContext::createBufferSource()
418 {
419     ASSERT(isMainThread());
420     lazyInitialize();
421     RefPtr<AudioBufferSourceNode> node = AudioBufferSourceNode::create(this, m_destinationNode->sampleRate());
422
423     // Because this is an AudioScheduledSourceNode, the context keeps a reference until it has finished playing.
424     // When this happens, AudioScheduledSourceNode::finish() calls AudioContext::notifyNodeFinishedProcessing().
425     refNode(node.get());
426
427     return node;
428 }
429
430 #if ENABLE(VIDEO)
431 PassRefPtr<MediaElementAudioSourceNode> AudioContext::createMediaElementSource(HTMLMediaElement* mediaElement, ExceptionCode& ec)
432 {
433     ASSERT(mediaElement);
434     if (!mediaElement) {
435         ec = INVALID_STATE_ERR;
436         return nullptr;
437     }
438         
439     ASSERT(isMainThread());
440     lazyInitialize();
441     
442     // First check if this media element already has a source node.
443     if (mediaElement->audioSourceNode()) {
444         ec = INVALID_STATE_ERR;
445         return nullptr;
446     }
447         
448     RefPtr<MediaElementAudioSourceNode> node = MediaElementAudioSourceNode::create(this, mediaElement);
449
450     mediaElement->setAudioSourceNode(node.get());
451
452     refNode(node.get()); // context keeps reference until node is disconnected
453     return node;
454 }
455 #endif
456
457 #if ENABLE(MEDIA_STREAM)
458 PassRefPtr<MediaStreamAudioSourceNode> AudioContext::createMediaStreamSource(MediaStream* mediaStream, ExceptionCode& ec)
459 {
460     ASSERT(isMainThread());
461
462     ASSERT(mediaStream);
463     if (!mediaStream) {
464         ec = INVALID_STATE_ERR;
465         return nullptr;
466     }
467
468     auto audioTracks = mediaStream->getAudioTracks();
469     if (audioTracks.isEmpty()) {
470         ec = INVALID_STATE_ERR;
471         return nullptr;
472     }
473
474     MediaStreamTrack* providerTrack = nullptr;
475     for (auto& track : audioTracks) {
476         if (track->audioSourceProvider()) {
477             providerTrack = track.get();
478             break;
479         }
480     }
481
482     if (!providerTrack) {
483         ec = INVALID_STATE_ERR;
484         return nullptr;
485     }
486
487     lazyInitialize();
488
489     auto node = MediaStreamAudioSourceNode::create(*this, *mediaStream, *providerTrack);
490     node->setFormat(2, sampleRate());
491
492     refNode(&node.get()); // context keeps reference until node is disconnected
493     return &node.get();
494 }
495
496 PassRefPtr<MediaStreamAudioDestinationNode> AudioContext::createMediaStreamDestination()
497 {
498     // FIXME: Add support for an optional argument which specifies the number of channels.
499     // FIXME: The default should probably be stereo instead of mono.
500     return MediaStreamAudioDestinationNode::create(this, 1);
501 }
502
503 #endif
504
505 PassRefPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(size_t bufferSize, ExceptionCode& ec)
506 {
507     // Set number of input/output channels to stereo by default.
508     return createScriptProcessor(bufferSize, 2, 2, ec);
509 }
510
511 PassRefPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(size_t bufferSize, size_t numberOfInputChannels, ExceptionCode& ec)
512 {
513     // Set number of output channels to stereo by default.
514     return createScriptProcessor(bufferSize, numberOfInputChannels, 2, ec);
515 }
516
517 PassRefPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(size_t bufferSize, size_t numberOfInputChannels, size_t numberOfOutputChannels, ExceptionCode& ec)
518 {
519     ASSERT(isMainThread());
520     lazyInitialize();
521     RefPtr<ScriptProcessorNode> node = ScriptProcessorNode::create(this, m_destinationNode->sampleRate(), bufferSize, numberOfInputChannels, numberOfOutputChannels);
522
523     if (!node.get()) {
524         ec = INDEX_SIZE_ERR;
525         return nullptr;
526     }
527
528     refNode(node.get()); // context keeps reference until we stop making javascript rendering callbacks
529     return node;
530 }
531
532 PassRefPtr<BiquadFilterNode> AudioContext::createBiquadFilter()
533 {
534     ASSERT(isMainThread());
535     lazyInitialize();
536     return BiquadFilterNode::create(this, m_destinationNode->sampleRate());
537 }
538
539 PassRefPtr<WaveShaperNode> AudioContext::createWaveShaper()
540 {
541     ASSERT(isMainThread());
542     lazyInitialize();
543     return WaveShaperNode::create(this);
544 }
545
546 PassRefPtr<PannerNode> AudioContext::createPanner()
547 {
548     ASSERT(isMainThread());
549     lazyInitialize();
550     return PannerNode::create(this, m_destinationNode->sampleRate());
551 }
552
553 PassRefPtr<ConvolverNode> AudioContext::createConvolver()
554 {
555     ASSERT(isMainThread());
556     lazyInitialize();
557     return ConvolverNode::create(this, m_destinationNode->sampleRate());
558 }
559
560 PassRefPtr<DynamicsCompressorNode> AudioContext::createDynamicsCompressor()
561 {
562     ASSERT(isMainThread());
563     lazyInitialize();
564     return DynamicsCompressorNode::create(this, m_destinationNode->sampleRate());
565 }
566
567 PassRefPtr<AnalyserNode> AudioContext::createAnalyser()
568 {
569     ASSERT(isMainThread());
570     lazyInitialize();
571     return AnalyserNode::create(this, m_destinationNode->sampleRate());
572 }
573
574 PassRefPtr<GainNode> AudioContext::createGain()
575 {
576     ASSERT(isMainThread());
577     lazyInitialize();
578     return GainNode::create(this, m_destinationNode->sampleRate());
579 }
580
581 PassRefPtr<DelayNode> AudioContext::createDelay(ExceptionCode& ec)
582 {
583     const double defaultMaxDelayTime = 1;
584     return createDelay(defaultMaxDelayTime, ec);
585 }
586
587 PassRefPtr<DelayNode> AudioContext::createDelay(double maxDelayTime, ExceptionCode& ec)
588 {
589     ASSERT(isMainThread());
590     lazyInitialize();
591     RefPtr<DelayNode> node = DelayNode::create(this, m_destinationNode->sampleRate(), maxDelayTime, ec);
592     if (ec)
593         return nullptr;
594     return node;
595 }
596
597 PassRefPtr<ChannelSplitterNode> AudioContext::createChannelSplitter(ExceptionCode& ec)
598 {
599     const unsigned ChannelSplitterDefaultNumberOfOutputs = 6;
600     return createChannelSplitter(ChannelSplitterDefaultNumberOfOutputs, ec);
601 }
602
603 PassRefPtr<ChannelSplitterNode> AudioContext::createChannelSplitter(size_t numberOfOutputs, ExceptionCode& ec)
604 {
605     ASSERT(isMainThread());
606     lazyInitialize();
607
608     RefPtr<ChannelSplitterNode> node = ChannelSplitterNode::create(this, m_destinationNode->sampleRate(), numberOfOutputs);
609
610     if (!node.get()) {
611         ec = SYNTAX_ERR;
612         return nullptr;
613     }
614
615     return node;
616 }
617
618 PassRefPtr<ChannelMergerNode> AudioContext::createChannelMerger(ExceptionCode& ec)
619 {
620     const unsigned ChannelMergerDefaultNumberOfInputs = 6;
621     return createChannelMerger(ChannelMergerDefaultNumberOfInputs, ec);
622 }
623
624 PassRefPtr<ChannelMergerNode> AudioContext::createChannelMerger(size_t numberOfInputs, ExceptionCode& ec)
625 {
626     ASSERT(isMainThread());
627     lazyInitialize();
628
629     RefPtr<ChannelMergerNode> node = ChannelMergerNode::create(this, m_destinationNode->sampleRate(), numberOfInputs);
630
631     if (!node.get()) {
632         ec = SYNTAX_ERR;
633         return nullptr;
634     }
635
636     return node;
637 }
638
639 PassRefPtr<OscillatorNode> AudioContext::createOscillator()
640 {
641     ASSERT(isMainThread());
642     lazyInitialize();
643
644     RefPtr<OscillatorNode> node = OscillatorNode::create(this, m_destinationNode->sampleRate());
645
646     // Because this is an AudioScheduledSourceNode, the context keeps a reference until it has finished playing.
647     // When this happens, AudioScheduledSourceNode::finish() calls AudioContext::notifyNodeFinishedProcessing().
648     refNode(node.get());
649
650     return node;
651 }
652
653 PassRefPtr<PeriodicWave> AudioContext::createPeriodicWave(Float32Array* real, Float32Array* imag, ExceptionCode& ec)
654 {
655     ASSERT(isMainThread());
656     
657     if (!real || !imag || (real->length() != imag->length() || (real->length() > MaxPeriodicWaveLength) || (real->length() <= 0))) {
658         ec = SYNTAX_ERR;
659         return nullptr;
660     }
661     
662     lazyInitialize();
663     return PeriodicWave::create(sampleRate(), real, imag);
664 }
665
666 void AudioContext::notifyNodeFinishedProcessing(AudioNode* node)
667 {
668     ASSERT(isAudioThread());
669     m_finishedNodes.append(node);
670 }
671
672 void AudioContext::derefFinishedSourceNodes()
673 {
674     ASSERT(isGraphOwner());
675     ASSERT(isAudioThread() || isAudioThreadFinished());
676     for (auto& node : m_finishedNodes)
677         derefNode(node);
678
679     m_finishedNodes.clear();
680 }
681
682 void AudioContext::refNode(AudioNode* node)
683 {
684     ASSERT(isMainThread());
685     AutoLocker locker(*this);
686     
687     node->ref(AudioNode::RefTypeConnection);
688     m_referencedNodes.append(node);
689 }
690
691 void AudioContext::derefNode(AudioNode* node)
692 {
693     ASSERT(isGraphOwner());
694     
695     node->deref(AudioNode::RefTypeConnection);
696
697     ASSERT(m_referencedNodes.contains(node));
698     m_referencedNodes.removeFirst(node);
699 }
700
701 void AudioContext::derefUnfinishedSourceNodes()
702 {
703     ASSERT(isMainThread() && isAudioThreadFinished());
704     for (auto& node : m_referencedNodes)
705         node->deref(AudioNode::RefTypeConnection);
706
707     m_referencedNodes.clear();
708 }
709
710 void AudioContext::lock(bool& mustReleaseLock)
711 {
712     // Don't allow regular lock in real-time audio thread.
713     ASSERT(isMainThread());
714
715     ThreadIdentifier thisThread = currentThread();
716
717     if (thisThread == m_graphOwnerThread) {
718         // We already have the lock.
719         mustReleaseLock = false;
720     } else {
721         // Acquire the lock.
722         m_contextGraphMutex.lock();
723         m_graphOwnerThread = thisThread;
724         mustReleaseLock = true;
725     }
726 }
727
728 bool AudioContext::tryLock(bool& mustReleaseLock)
729 {
730     ThreadIdentifier thisThread = currentThread();
731     bool isAudioThread = thisThread == audioThread();
732
733     // Try to catch cases of using try lock on main thread - it should use regular lock.
734     ASSERT(isAudioThread || isAudioThreadFinished());
735     
736     if (!isAudioThread) {
737         // In release build treat tryLock() as lock() (since above ASSERT(isAudioThread) never fires) - this is the best we can do.
738         lock(mustReleaseLock);
739         return true;
740     }
741     
742     bool hasLock;
743     
744     if (thisThread == m_graphOwnerThread) {
745         // Thread already has the lock.
746         hasLock = true;
747         mustReleaseLock = false;
748     } else {
749         // Don't already have the lock - try to acquire it.
750         hasLock = m_contextGraphMutex.tryLock();
751         
752         if (hasLock)
753             m_graphOwnerThread = thisThread;
754
755         mustReleaseLock = hasLock;
756     }
757     
758     return hasLock;
759 }
760
761 void AudioContext::unlock()
762 {
763     ASSERT(currentThread() == m_graphOwnerThread);
764
765     m_graphOwnerThread = UndefinedThreadIdentifier;
766     m_contextGraphMutex.unlock();
767 }
768
769 bool AudioContext::isAudioThread() const
770 {
771     return currentThread() == m_audioThread;
772 }
773
774 bool AudioContext::isGraphOwner() const
775 {
776     return currentThread() == m_graphOwnerThread;
777 }
778
779 void AudioContext::addDeferredFinishDeref(AudioNode* node)
780 {
781     ASSERT(isAudioThread());
782     m_deferredFinishDerefList.append(node);
783 }
784
785 void AudioContext::handlePreRenderTasks()
786 {
787     ASSERT(isAudioThread());
788
789     // At the beginning of every render quantum, try to update the internal rendering graph state (from main thread changes).
790     // It's OK if the tryLock() fails, we'll just take slightly longer to pick up the changes.
791     bool mustReleaseLock;
792     if (tryLock(mustReleaseLock)) {
793         // Fixup the state of any dirty AudioSummingJunctions and AudioNodeOutputs.
794         handleDirtyAudioSummingJunctions();
795         handleDirtyAudioNodeOutputs();
796
797         updateAutomaticPullNodes();
798
799         if (mustReleaseLock)
800             unlock();
801     }
802 }
803
804 void AudioContext::handlePostRenderTasks()
805 {
806     ASSERT(isAudioThread());
807
808     // Must use a tryLock() here too. Don't worry, the lock will very rarely be contended and this method is called frequently.
809     // The worst that can happen is that there will be some nodes which will take slightly longer than usual to be deleted or removed
810     // from the render graph (in which case they'll render silence).
811     bool mustReleaseLock;
812     if (tryLock(mustReleaseLock)) {
813         // Take care of finishing any derefs where the tryLock() failed previously.
814         handleDeferredFinishDerefs();
815
816         // Dynamically clean up nodes which are no longer needed.
817         derefFinishedSourceNodes();
818
819         // Don't delete in the real-time thread. Let the main thread do it.
820         // Ref-counted objects held by certain AudioNodes may not be thread-safe.
821         scheduleNodeDeletion();
822
823         // Fixup the state of any dirty AudioSummingJunctions and AudioNodeOutputs.
824         handleDirtyAudioSummingJunctions();
825         handleDirtyAudioNodeOutputs();
826
827         updateAutomaticPullNodes();
828
829         if (mustReleaseLock)
830             unlock();
831     }
832 }
833
834 void AudioContext::handleDeferredFinishDerefs()
835 {
836     ASSERT(isAudioThread() && isGraphOwner());
837     for (auto& node : m_deferredFinishDerefList)
838         node->finishDeref(AudioNode::RefTypeConnection);
839     
840     m_deferredFinishDerefList.clear();
841 }
842
843 void AudioContext::markForDeletion(AudioNode* node)
844 {
845     ASSERT(isGraphOwner());
846
847     if (isAudioThreadFinished())
848         m_nodesToDelete.append(node);
849     else
850         m_nodesMarkedForDeletion.append(node);
851
852     // This is probably the best time for us to remove the node from automatic pull list,
853     // since all connections are gone and we hold the graph lock. Then when handlePostRenderTasks()
854     // gets a chance to schedule the deletion work, updateAutomaticPullNodes() also gets a chance to
855     // modify m_renderingAutomaticPullNodes.
856     removeAutomaticPullNode(node);
857 }
858
859 void AudioContext::scheduleNodeDeletion()
860 {
861     bool isGood = m_isInitialized && isGraphOwner();
862     ASSERT(isGood);
863     if (!isGood)
864         return;
865
866     // Make sure to call deleteMarkedNodes() on main thread.    
867     if (m_nodesMarkedForDeletion.size() && !m_isDeletionScheduled) {
868         m_nodesToDelete.appendVector(m_nodesMarkedForDeletion);
869         m_nodesMarkedForDeletion.clear();
870
871         m_isDeletionScheduled = true;
872
873         RefPtr<AudioContext> strongThis(this);
874         callOnMainThread([strongThis] {
875             strongThis->deleteMarkedNodes();
876         });
877     }
878 }
879
880 void AudioContext::deleteMarkedNodes()
881 {
882     ASSERT(isMainThread());
883
884     // Protect this object from being deleted before we release the mutex locked by AutoLocker.
885     Ref<AudioContext> protect(*this);
886     {
887         AutoLocker locker(*this);
888
889         while (m_nodesToDelete.size()) {
890             AudioNode* node = m_nodesToDelete.takeLast();
891
892             // Before deleting the node, clear out any AudioNodeInputs from m_dirtySummingJunctions.
893             unsigned numberOfInputs = node->numberOfInputs();
894             for (unsigned i = 0; i < numberOfInputs; ++i)
895                 m_dirtySummingJunctions.remove(node->input(i));
896
897             // Before deleting the node, clear out any AudioNodeOutputs from m_dirtyAudioNodeOutputs.
898             unsigned numberOfOutputs = node->numberOfOutputs();
899             for (unsigned i = 0; i < numberOfOutputs; ++i)
900                 m_dirtyAudioNodeOutputs.remove(node->output(i));
901
902             // Finally, delete it.
903             delete node;
904         }
905         m_isDeletionScheduled = false;
906     }
907 }
908
909 void AudioContext::markSummingJunctionDirty(AudioSummingJunction* summingJunction)
910 {
911     ASSERT(isGraphOwner());    
912     m_dirtySummingJunctions.add(summingJunction);
913 }
914
915 void AudioContext::removeMarkedSummingJunction(AudioSummingJunction* summingJunction)
916 {
917     ASSERT(isMainThread());
918     AutoLocker locker(*this);
919     m_dirtySummingJunctions.remove(summingJunction);
920 }
921
922 void AudioContext::markAudioNodeOutputDirty(AudioNodeOutput* output)
923 {
924     ASSERT(isGraphOwner());    
925     m_dirtyAudioNodeOutputs.add(output);
926 }
927
928 void AudioContext::handleDirtyAudioSummingJunctions()
929 {
930     ASSERT(isGraphOwner());    
931
932     for (auto& junction : m_dirtySummingJunctions)
933         junction->updateRenderingState();
934
935     m_dirtySummingJunctions.clear();
936 }
937
938 void AudioContext::handleDirtyAudioNodeOutputs()
939 {
940     ASSERT(isGraphOwner());    
941
942     for (auto& output : m_dirtyAudioNodeOutputs)
943         output->updateRenderingState();
944
945     m_dirtyAudioNodeOutputs.clear();
946 }
947
948 void AudioContext::addAutomaticPullNode(AudioNode* node)
949 {
950     ASSERT(isGraphOwner());
951
952     if (m_automaticPullNodes.add(node).isNewEntry)
953         m_automaticPullNodesNeedUpdating = true;
954 }
955
956 void AudioContext::removeAutomaticPullNode(AudioNode* node)
957 {
958     ASSERT(isGraphOwner());
959
960     if (m_automaticPullNodes.remove(node))
961         m_automaticPullNodesNeedUpdating = true;
962 }
963
964 void AudioContext::updateAutomaticPullNodes()
965 {
966     ASSERT(isGraphOwner());
967
968     if (m_automaticPullNodesNeedUpdating) {
969         // Copy from m_automaticPullNodes to m_renderingAutomaticPullNodes.
970         m_renderingAutomaticPullNodes.resize(m_automaticPullNodes.size());
971
972         unsigned i = 0;
973         for (auto& output : m_automaticPullNodes)
974             m_renderingAutomaticPullNodes[i++] = output;
975
976         m_automaticPullNodesNeedUpdating = false;
977     }
978 }
979
980 void AudioContext::processAutomaticPullNodes(size_t framesToProcess)
981 {
982     ASSERT(isAudioThread());
983
984     for (auto& node : m_renderingAutomaticPullNodes)
985         node->processIfNecessary(framesToProcess);
986 }
987
988 ScriptExecutionContext* AudioContext::scriptExecutionContext() const
989 {
990     return m_isStopScheduled ? 0 : ActiveDOMObject::scriptExecutionContext();
991 }
992
993 void AudioContext::nodeWillBeginPlayback()
994 {
995     // Called by scheduled AudioNodes when clients schedule their start times.
996     // Prior to the introduction of suspend(), resume(), and stop(), starting
997     // a scheduled AudioNode would remove the user-gesture restriction, if present,
998     // and would thus unmute the context. Now that AudioContext stays in the
999     // "suspended" state if a user-gesture restriction is present, starting a
1000     // schedule AudioNode should set the state to "running", but only if the
1001     // user-gesture restriction is set.
1002     if (userGestureRequiredForAudioStart())
1003         startRendering();
1004 }
1005
1006 bool AudioContext::willBeginPlayback()
1007 {
1008     if (userGestureRequiredForAudioStart()) {
1009         if (!ScriptController::processingUserGestureForMedia())
1010             return false;
1011         removeBehaviorRestriction(AudioContext::RequireUserGestureForAudioStartRestriction);
1012     }
1013
1014     if (pageConsentRequiredForAudioStart()) {
1015         Page* page = document()->page();
1016         if (page && !page->canStartMedia()) {
1017             document()->addMediaCanStartListener(this);
1018             return false;
1019         }
1020         removeBehaviorRestriction(AudioContext::RequirePageConsentForAudioStartRestriction);
1021     }
1022
1023     return m_mediaSession->clientWillBeginPlayback();
1024 }
1025
1026 bool AudioContext::willPausePlayback()
1027 {
1028     if (userGestureRequiredForAudioStart()) {
1029         if (!ScriptController::processingUserGestureForMedia())
1030             return false;
1031         removeBehaviorRestriction(AudioContext::RequireUserGestureForAudioStartRestriction);
1032     }
1033
1034     if (pageConsentRequiredForAudioStart()) {
1035         Page* page = document()->page();
1036         if (page && !page->canStartMedia()) {
1037             document()->addMediaCanStartListener(this);
1038             return false;
1039         }
1040         removeBehaviorRestriction(AudioContext::RequirePageConsentForAudioStartRestriction);
1041     }
1042     
1043     return m_mediaSession->clientWillPausePlayback();
1044 }
1045
1046 void AudioContext::startRendering()
1047 {
1048     if (!willBeginPlayback())
1049         return;
1050
1051     destination()->startRendering();
1052     setState(State::Running);
1053 }
1054
1055 void AudioContext::mediaCanStart()
1056 {
1057     removeBehaviorRestriction(AudioContext::RequirePageConsentForAudioStartRestriction);
1058 }
1059
1060 MediaProducer::MediaStateFlags AudioContext::mediaState() const
1061 {
1062     if (!m_isStopScheduled && m_destinationNode && m_destinationNode->isPlayingAudio())
1063         return MediaProducer::IsPlayingAudio;
1064
1065     return MediaProducer::IsNotPlaying;
1066 }
1067
1068 void AudioContext::pageMutedStateDidChange()
1069 {
1070     if (m_destinationNode && document()->page())
1071         m_destinationNode->setMuted(document()->page()->isMuted());
1072 }
1073
1074 void AudioContext::isPlayingAudioDidChange()
1075 {
1076     // Make sure to call Document::updateIsPlayingMedia() on the main thread, since
1077     // we could be on the audio I/O thread here and the call into WebCore could block.
1078     RefPtr<AudioContext> strongThis(this);
1079     callOnMainThread([strongThis] {
1080         if (strongThis->document())
1081             strongThis->document()->updateIsPlayingMedia();
1082     });
1083 }
1084
1085 void AudioContext::fireCompletionEvent()
1086 {
1087     ASSERT(isMainThread());
1088     if (!isMainThread())
1089         return;
1090         
1091     AudioBuffer* renderedBuffer = m_renderTarget.get();
1092     setState(State::Closed);
1093
1094     ASSERT(renderedBuffer);
1095     if (!renderedBuffer)
1096         return;
1097
1098     // Avoid firing the event if the document has already gone away.
1099     if (scriptExecutionContext()) {
1100         // Call the offline rendering completion event listener.
1101         m_eventQueue->enqueueEvent(OfflineAudioCompletionEvent::create(renderedBuffer));
1102     }
1103 }
1104
1105 void AudioContext::incrementActiveSourceCount()
1106 {
1107     ++m_activeSourceCount;
1108 }
1109
1110 void AudioContext::decrementActiveSourceCount()
1111 {
1112     --m_activeSourceCount;
1113 }
1114
1115 void AudioContext::suspend(Promise&& promise)
1116 {
1117     if (isOfflineContext()) {
1118         promise.reject(INVALID_STATE_ERR);
1119         return;
1120     }
1121
1122     if (m_state == State::Suspended) {
1123         promise.resolve(nullptr);
1124         return;
1125     }
1126
1127     if (m_state == State::Closed || m_state == State::Interrupted || !m_destinationNode) {
1128         promise.reject(0);
1129         return;
1130     }
1131
1132     addReaction(State::Suspended, WTFMove(promise));
1133
1134     if (!willPausePlayback())
1135         return;
1136
1137     lazyInitialize();
1138
1139     RefPtr<AudioContext> strongThis(this);
1140     m_destinationNode->suspend([strongThis] {
1141         strongThis->setState(State::Suspended);
1142     });
1143 }
1144
1145 void AudioContext::resume(Promise&& promise)
1146 {
1147     if (isOfflineContext()) {
1148         promise.reject(INVALID_STATE_ERR);
1149         return;
1150     }
1151
1152     if (m_state == State::Running) {
1153         promise.resolve(nullptr);
1154         return;
1155     }
1156
1157     if (m_state == State::Closed || !m_destinationNode) {
1158         promise.reject(0);
1159         return;
1160     }
1161
1162     addReaction(State::Running, WTFMove(promise));
1163
1164     if (!willBeginPlayback())
1165         return;
1166
1167     lazyInitialize();
1168
1169     RefPtr<AudioContext> strongThis(this);
1170     m_destinationNode->resume([strongThis] {
1171         strongThis->setState(State::Running);
1172     });
1173 }
1174
1175 void AudioContext::close(Promise&& promise)
1176 {
1177     if (isOfflineContext()) {
1178         promise.reject(INVALID_STATE_ERR);
1179         return;
1180     }
1181
1182     if (m_state == State::Closed || !m_destinationNode) {
1183         promise.resolve(nullptr);
1184         return;
1185     }
1186
1187     addReaction(State::Closed, WTFMove(promise));
1188
1189     lazyInitialize();
1190
1191     RefPtr<AudioContext> strongThis(this);
1192     m_destinationNode->close([strongThis] {
1193         strongThis->setState(State::Closed);
1194         strongThis->uninitialize();
1195     });
1196 }
1197
1198
1199 void AudioContext::suspendPlayback()
1200 {
1201     if (!m_destinationNode || m_state == State::Closed)
1202         return;
1203
1204     if (m_state == State::Suspended) {
1205         if (m_mediaSession->state() == PlatformMediaSession::Interrupted)
1206             setState(State::Interrupted);
1207         return;
1208     }
1209
1210     lazyInitialize();
1211
1212     RefPtr<AudioContext> strongThis(this);
1213     m_destinationNode->suspend([strongThis] {
1214         bool interrupted = strongThis->m_mediaSession->state() == PlatformMediaSession::Interrupted;
1215         strongThis->setState(interrupted ? State::Interrupted : State::Suspended);
1216     });
1217 }
1218
1219 void AudioContext::mayResumePlayback(bool shouldResume)
1220 {
1221     if (!m_destinationNode || m_state == State::Closed || m_state == State::Running)
1222         return;
1223
1224     if (!shouldResume) {
1225         setState(State::Suspended);
1226         return;
1227     }
1228
1229     if (!willBeginPlayback())
1230         return;
1231
1232     lazyInitialize();
1233
1234     RefPtr<AudioContext> strongThis(this);
1235     m_destinationNode->resume([strongThis] {
1236         strongThis->setState(State::Running);
1237     });
1238 }
1239
1240
1241 } // namespace WebCore
1242
1243 #endif // ENABLE(WEB_AUDIO)