Replaced 0 with nullptr in WebCore/Modules.
[WebKit-https.git] / Source / WebCore / Modules / webaudio / AudioContext.cpp
1 /*
2  * Copyright (C) 2010, Google Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1.  Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2.  Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
15  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
16  * DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
17  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
18  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
19  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
20  * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
21  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
22  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
23  */
24
25 #include "config.h"
26
27 #if ENABLE(WEB_AUDIO)
28
29 #include "AudioContext.h"
30
31 #include "AnalyserNode.h"
32 #include "AsyncAudioDecoder.h"
33 #include "AudioBuffer.h"
34 #include "AudioBufferCallback.h"
35 #include "AudioBufferSourceNode.h"
36 #include "AudioListener.h"
37 #include "AudioNodeInput.h"
38 #include "AudioNodeOutput.h"
39 #include "BiquadFilterNode.h"
40 #include "ChannelMergerNode.h"
41 #include "ChannelSplitterNode.h"
42 #include "ConvolverNode.h"
43 #include "DefaultAudioDestinationNode.h"
44 #include "DelayNode.h"
45 #include "Document.h"
46 #include "DynamicsCompressorNode.h"
47 #include "EventNames.h"
48 #include "ExceptionCode.h"
49 #include "FFTFrame.h"
50 #include "GainNode.h"
51 #include "GenericEventQueue.h"
52 #include "HRTFDatabaseLoader.h"
53 #include "HRTFPanner.h"
54 #include "OfflineAudioCompletionEvent.h"
55 #include "OfflineAudioDestinationNode.h"
56 #include "OscillatorNode.h"
57 #include "Page.h"
58 #include "PannerNode.h"
59 #include "PeriodicWave.h"
60 #include "ScriptController.h"
61 #include "ScriptProcessorNode.h"
62 #include "WaveShaperNode.h"
63 #include <inspector/ScriptCallStack.h>
64 #include <wtf/NeverDestroyed.h>
65
66 #if ENABLE(MEDIA_STREAM)
67 #include "MediaStream.h"
68 #include "MediaStreamAudioDestinationNode.h"
69 #include "MediaStreamAudioSource.h"
70 #include "MediaStreamAudioSourceNode.h"
71 #endif
72
73 #if ENABLE(VIDEO)
74 #include "HTMLMediaElement.h"
75 #include "MediaElementAudioSourceNode.h"
76 #endif
77
78 #if DEBUG_AUDIONODE_REFERENCES
79 #include <stdio.h>
80 #endif
81
82 #if USE(GSTREAMER)
83 #include "GStreamerUtilities.h"
84 #endif
85
86 #if PLATFORM(IOS)
87 #include "ScriptController.h"
88 #include "Settings.h"
89 #endif
90
91 #include <runtime/ArrayBuffer.h>
92 #include <wtf/Atomics.h>
93 #include <wtf/MainThread.h>
94 #include <wtf/Ref.h>
95 #include <wtf/RefCounted.h>
96 #include <wtf/text/WTFString.h>
97
98 // FIXME: check the proper way to reference an undefined thread ID
99 const int UndefinedThreadIdentifier = 0xffffffff;
100
101 const unsigned MaxPeriodicWaveLength = 4096;
102
103 namespace WebCore {
104     
105 bool AudioContext::isSampleRateRangeGood(float sampleRate)
106 {
107     // FIXME: It would be nice if the minimum sample-rate could be less than 44.1KHz,
108     // but that will require some fixes in HRTFPanner::fftSizeForSampleRate(), and some testing there.
109     return sampleRate >= 44100 && sampleRate <= 96000;
110 }
111
112 // Don't allow more than this number of simultaneous AudioContexts talking to hardware.
113 const unsigned MaxHardwareContexts = 4;
114 unsigned AudioContext::s_hardwareContextCount = 0;
115     
116 RefPtr<AudioContext> AudioContext::create(Document& document, ExceptionCode& ec)
117 {
118     UNUSED_PARAM(ec);
119
120     ASSERT(isMainThread());
121     if (s_hardwareContextCount >= MaxHardwareContexts)
122         return nullptr;
123
124     RefPtr<AudioContext> audioContext(adoptRef(new AudioContext(document)));
125     audioContext->suspendIfNeeded();
126     return audioContext;
127 }
128
129 // Constructor for rendering to the audio hardware.
130 AudioContext::AudioContext(Document& document)
131     : ActiveDOMObject(&document)
132     , m_mediaSession(PlatformMediaSession::create(*this))
133     , m_eventQueue(std::make_unique<GenericEventQueue>(*this))
134     , m_graphOwnerThread(UndefinedThreadIdentifier)
135 {
136     constructCommon();
137
138     m_destinationNode = DefaultAudioDestinationNode::create(this);
139
140     // Initialize the destination node's muted state to match the page's current muted state.
141     pageMutedStateDidChange();
142 }
143
144 // Constructor for offline (non-realtime) rendering.
145 AudioContext::AudioContext(Document& document, unsigned numberOfChannels, size_t numberOfFrames, float sampleRate)
146     : ActiveDOMObject(&document)
147     , m_isOfflineContext(true)
148     , m_mediaSession(PlatformMediaSession::create(*this))
149     , m_eventQueue(std::make_unique<GenericEventQueue>(*this))
150     , m_graphOwnerThread(UndefinedThreadIdentifier)
151 {
152     constructCommon();
153
154     // Create a new destination for offline rendering.
155     m_renderTarget = AudioBuffer::create(numberOfChannels, numberOfFrames, sampleRate);
156     m_destinationNode = OfflineAudioDestinationNode::create(this, m_renderTarget.get());
157 }
158
159 void AudioContext::constructCommon()
160 {
161     // According to spec AudioContext must die only after page navigate.
162     // Lets mark it as ActiveDOMObject with pending activity and unmark it in clear method.
163     setPendingActivity(this);
164
165 #if USE(GSTREAMER)
166     initializeGStreamer();
167 #endif
168
169     FFTFrame::initialize();
170     
171     m_listener = AudioListener::create();
172
173 #if PLATFORM(IOS)
174     if (!document()->settings() || document()->settings()->requiresUserGestureForMediaPlayback())
175         addBehaviorRestriction(RequireUserGestureForAudioStartRestriction);
176     else
177         m_restrictions = NoRestrictions;
178 #endif
179
180 #if PLATFORM(COCOA)
181     addBehaviorRestriction(RequirePageConsentForAudioStartRestriction);
182 #endif
183 }
184
185 AudioContext::~AudioContext()
186 {
187 #if DEBUG_AUDIONODE_REFERENCES
188     fprintf(stderr, "%p: AudioContext::~AudioContext()\n", this);
189 #endif
190     ASSERT(!m_isInitialized);
191     ASSERT(m_isStopScheduled);
192     ASSERT(m_nodesToDelete.isEmpty());
193     ASSERT(m_referencedNodes.isEmpty());
194     ASSERT(m_finishedNodes.isEmpty()); // FIXME (bug 105870): This assertion fails on tests sometimes.
195     ASSERT(m_automaticPullNodes.isEmpty());
196     if (m_automaticPullNodesNeedUpdating)
197         m_renderingAutomaticPullNodes.resize(m_automaticPullNodes.size());
198     ASSERT(m_renderingAutomaticPullNodes.isEmpty());
199     // FIXME: Can we assert that m_deferredFinishDerefList is empty?
200 }
201
202 void AudioContext::lazyInitialize()
203 {
204     if (m_isInitialized)
205         return;
206
207     // Don't allow the context to initialize a second time after it's already been explicitly uninitialized.
208     ASSERT(!m_isAudioThreadFinished);
209     if (m_isAudioThreadFinished)
210         return;
211
212     if (m_destinationNode.get()) {
213         m_destinationNode->initialize();
214
215         if (!isOfflineContext()) {
216             document()->addAudioProducer(this);
217
218             // This starts the audio thread. The destination node's provideInput() method will now be called repeatedly to render audio.
219             // Each time provideInput() is called, a portion of the audio stream is rendered. Let's call this time period a "render quantum".
220             // NOTE: for now default AudioContext does not need an explicit startRendering() call from JavaScript.
221             // We may want to consider requiring it for symmetry with OfflineAudioContext.
222             startRendering();
223             ++s_hardwareContextCount;
224         }
225     }
226     m_isInitialized = true;
227 }
228
229 void AudioContext::clear()
230 {
231     // We have to release our reference to the destination node before the context will ever be deleted since the destination node holds a reference to the context.
232     if (m_destinationNode)
233         m_destinationNode.clear();
234
235     // Audio thread is dead. Nobody will schedule node deletion action. Let's do it ourselves.
236     do {
237         deleteMarkedNodes();
238         m_nodesToDelete.appendVector(m_nodesMarkedForDeletion);
239         m_nodesMarkedForDeletion.clear();
240     } while (m_nodesToDelete.size());
241
242     // It was set in constructCommon.
243     unsetPendingActivity(this);
244 }
245
246 void AudioContext::uninitialize()
247 {
248     ASSERT(isMainThread());
249
250     if (!m_isInitialized)
251         return;
252
253     // This stops the audio thread and all audio rendering.
254     m_destinationNode->uninitialize();
255
256     // Don't allow the context to initialize a second time after it's already been explicitly uninitialized.
257     m_isAudioThreadFinished = true;
258
259     if (!isOfflineContext()) {
260         document()->removeAudioProducer(this);
261
262         ASSERT(s_hardwareContextCount);
263         --s_hardwareContextCount;
264
265         // Offline contexts move to 'Closed' state when dispatching the completion event.
266         setState(State::Closed);
267     }
268
269     // Get rid of the sources which may still be playing.
270     derefUnfinishedSourceNodes();
271
272     m_isInitialized = false;
273 }
274
275 bool AudioContext::isInitialized() const
276 {
277     return m_isInitialized;
278 }
279
280 void AudioContext::addReaction(State state, std::function<void()> reaction)
281 {
282     size_t stateIndex = static_cast<size_t>(state);
283     if (stateIndex >= m_stateReactions.size())
284         m_stateReactions.resize(stateIndex + 1);
285
286     m_stateReactions[stateIndex].append(reaction);
287 }
288
289 void AudioContext::setState(State state)
290 {
291     if (m_state == state)
292         return;
293
294     m_state = state;
295     m_eventQueue->enqueueEvent(Event::create(eventNames().statechangeEvent, true, false));
296
297     size_t stateIndex = static_cast<size_t>(state);
298     if (stateIndex >= m_stateReactions.size())
299         return;
300
301     Vector<std::function<void()>> reactions;
302     m_stateReactions[stateIndex].swap(reactions);
303
304     for (auto& reaction : reactions)
305         reaction();
306 }
307
308 const AtomicString& AudioContext::state() const
309 {
310     static NeverDestroyed<AtomicString> suspended("suspended");
311     static NeverDestroyed<AtomicString> running("running");
312     static NeverDestroyed<AtomicString> interrupted("interrupted");
313     static NeverDestroyed<AtomicString> closed("closed");
314
315     switch (m_state) {
316     case State::Suspended:
317         return suspended;
318     case State::Running:
319         return running;
320     case State::Interrupted:
321         return interrupted;
322     case State::Closed:
323         return closed;
324     }
325
326     ASSERT_NOT_REACHED();
327     return suspended;
328 }
329
330 void AudioContext::stop()
331 {
332     ASSERT(isMainThread());
333
334     // Usually ScriptExecutionContext calls stop twice.
335     if (m_isStopScheduled)
336         return;
337     m_isStopScheduled = true;
338
339     document()->updateIsPlayingMedia();
340
341     m_eventQueue->close();
342
343     // Don't call uninitialize() immediately here because the ScriptExecutionContext is in the middle
344     // of dealing with all of its ActiveDOMObjects at this point. uninitialize() can de-reference other
345     // ActiveDOMObjects so let's schedule uninitialize() to be called later.
346     // FIXME: see if there's a more direct way to handle this issue.
347     // FIXME: This sounds very wrong. The whole idea of stop() is that it stops everything, and if we
348     // schedule some observable work for later, the work likely happens at an inappropriate time.
349     callOnMainThread([this] {
350         uninitialize();
351         clear();
352     });
353 }
354
355 bool AudioContext::canSuspendForPageCache() const
356 {
357     // FIXME: We should be able to suspend while rendering as well with some more code.
358     return m_state == State::Suspended || m_state == State::Closed;
359 }
360
361 const char* AudioContext::activeDOMObjectName() const
362 {
363     return "AudioContext";
364 }
365
366 Document* AudioContext::document() const
367 {
368     ASSERT(m_scriptExecutionContext);
369     return downcast<Document>(m_scriptExecutionContext);
370 }
371
372 PassRefPtr<AudioBuffer> AudioContext::createBuffer(unsigned numberOfChannels, size_t numberOfFrames, float sampleRate, ExceptionCode& ec)
373 {
374     RefPtr<AudioBuffer> audioBuffer = AudioBuffer::create(numberOfChannels, numberOfFrames, sampleRate);
375     if (!audioBuffer.get()) {
376         ec = NOT_SUPPORTED_ERR;
377         return nullptr;
378     }
379
380     return audioBuffer;
381 }
382
383 PassRefPtr<AudioBuffer> AudioContext::createBuffer(ArrayBuffer* arrayBuffer, bool mixToMono, ExceptionCode& ec)
384 {
385     ASSERT(arrayBuffer);
386     if (!arrayBuffer) {
387         ec = SYNTAX_ERR;
388         return nullptr;
389     }
390
391     RefPtr<AudioBuffer> audioBuffer = AudioBuffer::createFromAudioFileData(arrayBuffer->data(), arrayBuffer->byteLength(), mixToMono, sampleRate());
392     if (!audioBuffer.get()) {
393         ec = SYNTAX_ERR;
394         return nullptr;
395     }
396
397     return audioBuffer;
398 }
399
400 void AudioContext::decodeAudioData(ArrayBuffer* audioData, PassRefPtr<AudioBufferCallback> successCallback, PassRefPtr<AudioBufferCallback> errorCallback, ExceptionCode& ec)
401 {
402     if (!audioData) {
403         ec = SYNTAX_ERR;
404         return;
405     }
406     m_audioDecoder.decodeAsync(audioData, sampleRate(), successCallback, errorCallback);
407 }
408
409 PassRefPtr<AudioBufferSourceNode> AudioContext::createBufferSource()
410 {
411     ASSERT(isMainThread());
412     lazyInitialize();
413     RefPtr<AudioBufferSourceNode> node = AudioBufferSourceNode::create(this, m_destinationNode->sampleRate());
414
415     // Because this is an AudioScheduledSourceNode, the context keeps a reference until it has finished playing.
416     // When this happens, AudioScheduledSourceNode::finish() calls AudioContext::notifyNodeFinishedProcessing().
417     refNode(node.get());
418
419     return node;
420 }
421
422 #if ENABLE(VIDEO)
423 PassRefPtr<MediaElementAudioSourceNode> AudioContext::createMediaElementSource(HTMLMediaElement* mediaElement, ExceptionCode& ec)
424 {
425     ASSERT(mediaElement);
426     if (!mediaElement) {
427         ec = INVALID_STATE_ERR;
428         return nullptr;
429     }
430         
431     ASSERT(isMainThread());
432     lazyInitialize();
433     
434     // First check if this media element already has a source node.
435     if (mediaElement->audioSourceNode()) {
436         ec = INVALID_STATE_ERR;
437         return nullptr;
438     }
439         
440     RefPtr<MediaElementAudioSourceNode> node = MediaElementAudioSourceNode::create(this, mediaElement);
441
442     mediaElement->setAudioSourceNode(node.get());
443
444     refNode(node.get()); // context keeps reference until node is disconnected
445     return node;
446 }
447 #endif
448
449 #if ENABLE(MEDIA_STREAM)
450 PassRefPtr<MediaStreamAudioSourceNode> AudioContext::createMediaStreamSource(MediaStream* mediaStream, ExceptionCode& ec)
451 {
452     ASSERT(mediaStream);
453     if (!mediaStream) {
454         ec = INVALID_STATE_ERR;
455         return nullptr;
456     }
457
458     ASSERT(isMainThread());
459     lazyInitialize();
460
461     AudioSourceProvider* provider = nullptr;
462
463     RefPtr<MediaStreamTrack> audioTrack;
464
465     // FIXME: get a provider for non-local MediaStreams (like from a remote peer).
466     for (auto& track : mediaStream->getAudioTracks()) {
467         audioTrack = track;
468         if (audioTrack->source()->isAudioStreamSource()) {
469             auto source = static_cast<MediaStreamAudioSource*>(audioTrack->source());
470             ASSERT(!source->deviceId().isEmpty());
471             destination()->enableInput(source->deviceId());
472             provider = destination()->localAudioInputProvider();
473             break;
474         }
475     }
476
477     RefPtr<MediaStreamAudioSourceNode> node = MediaStreamAudioSourceNode::create(this, mediaStream, audioTrack.get(), provider);
478
479     // FIXME: Only stereo streams are supported right now. We should be able to accept multi-channel streams.
480     node->setFormat(2, sampleRate());
481
482     refNode(node.get()); // context keeps reference until node is disconnected
483     return node;
484 }
485
486 PassRefPtr<MediaStreamAudioDestinationNode> AudioContext::createMediaStreamDestination()
487 {
488     // FIXME: Add support for an optional argument which specifies the number of channels.
489     // FIXME: The default should probably be stereo instead of mono.
490     return MediaStreamAudioDestinationNode::create(this, 1);
491 }
492
493 #endif
494
495 PassRefPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(size_t bufferSize, ExceptionCode& ec)
496 {
497     // Set number of input/output channels to stereo by default.
498     return createScriptProcessor(bufferSize, 2, 2, ec);
499 }
500
501 PassRefPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(size_t bufferSize, size_t numberOfInputChannels, ExceptionCode& ec)
502 {
503     // Set number of output channels to stereo by default.
504     return createScriptProcessor(bufferSize, numberOfInputChannels, 2, ec);
505 }
506
507 PassRefPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(size_t bufferSize, size_t numberOfInputChannels, size_t numberOfOutputChannels, ExceptionCode& ec)
508 {
509     ASSERT(isMainThread());
510     lazyInitialize();
511     RefPtr<ScriptProcessorNode> node = ScriptProcessorNode::create(this, m_destinationNode->sampleRate(), bufferSize, numberOfInputChannels, numberOfOutputChannels);
512
513     if (!node.get()) {
514         ec = INDEX_SIZE_ERR;
515         return nullptr;
516     }
517
518     refNode(node.get()); // context keeps reference until we stop making javascript rendering callbacks
519     return node;
520 }
521
522 PassRefPtr<BiquadFilterNode> AudioContext::createBiquadFilter()
523 {
524     ASSERT(isMainThread());
525     lazyInitialize();
526     return BiquadFilterNode::create(this, m_destinationNode->sampleRate());
527 }
528
529 PassRefPtr<WaveShaperNode> AudioContext::createWaveShaper()
530 {
531     ASSERT(isMainThread());
532     lazyInitialize();
533     return WaveShaperNode::create(this);
534 }
535
536 PassRefPtr<PannerNode> AudioContext::createPanner()
537 {
538     ASSERT(isMainThread());
539     lazyInitialize();
540     return PannerNode::create(this, m_destinationNode->sampleRate());
541 }
542
543 PassRefPtr<ConvolverNode> AudioContext::createConvolver()
544 {
545     ASSERT(isMainThread());
546     lazyInitialize();
547     return ConvolverNode::create(this, m_destinationNode->sampleRate());
548 }
549
550 PassRefPtr<DynamicsCompressorNode> AudioContext::createDynamicsCompressor()
551 {
552     ASSERT(isMainThread());
553     lazyInitialize();
554     return DynamicsCompressorNode::create(this, m_destinationNode->sampleRate());
555 }
556
557 PassRefPtr<AnalyserNode> AudioContext::createAnalyser()
558 {
559     ASSERT(isMainThread());
560     lazyInitialize();
561     return AnalyserNode::create(this, m_destinationNode->sampleRate());
562 }
563
564 PassRefPtr<GainNode> AudioContext::createGain()
565 {
566     ASSERT(isMainThread());
567     lazyInitialize();
568     return GainNode::create(this, m_destinationNode->sampleRate());
569 }
570
571 PassRefPtr<DelayNode> AudioContext::createDelay(ExceptionCode& ec)
572 {
573     const double defaultMaxDelayTime = 1;
574     return createDelay(defaultMaxDelayTime, ec);
575 }
576
577 PassRefPtr<DelayNode> AudioContext::createDelay(double maxDelayTime, ExceptionCode& ec)
578 {
579     ASSERT(isMainThread());
580     lazyInitialize();
581     RefPtr<DelayNode> node = DelayNode::create(this, m_destinationNode->sampleRate(), maxDelayTime, ec);
582     if (ec)
583         return nullptr;
584     return node;
585 }
586
587 PassRefPtr<ChannelSplitterNode> AudioContext::createChannelSplitter(ExceptionCode& ec)
588 {
589     const unsigned ChannelSplitterDefaultNumberOfOutputs = 6;
590     return createChannelSplitter(ChannelSplitterDefaultNumberOfOutputs, ec);
591 }
592
593 PassRefPtr<ChannelSplitterNode> AudioContext::createChannelSplitter(size_t numberOfOutputs, ExceptionCode& ec)
594 {
595     ASSERT(isMainThread());
596     lazyInitialize();
597
598     RefPtr<ChannelSplitterNode> node = ChannelSplitterNode::create(this, m_destinationNode->sampleRate(), numberOfOutputs);
599
600     if (!node.get()) {
601         ec = SYNTAX_ERR;
602         return nullptr;
603     }
604
605     return node;
606 }
607
608 PassRefPtr<ChannelMergerNode> AudioContext::createChannelMerger(ExceptionCode& ec)
609 {
610     const unsigned ChannelMergerDefaultNumberOfInputs = 6;
611     return createChannelMerger(ChannelMergerDefaultNumberOfInputs, ec);
612 }
613
614 PassRefPtr<ChannelMergerNode> AudioContext::createChannelMerger(size_t numberOfInputs, ExceptionCode& ec)
615 {
616     ASSERT(isMainThread());
617     lazyInitialize();
618
619     RefPtr<ChannelMergerNode> node = ChannelMergerNode::create(this, m_destinationNode->sampleRate(), numberOfInputs);
620
621     if (!node.get()) {
622         ec = SYNTAX_ERR;
623         return nullptr;
624     }
625
626     return node;
627 }
628
629 PassRefPtr<OscillatorNode> AudioContext::createOscillator()
630 {
631     ASSERT(isMainThread());
632     lazyInitialize();
633
634     RefPtr<OscillatorNode> node = OscillatorNode::create(this, m_destinationNode->sampleRate());
635
636     // Because this is an AudioScheduledSourceNode, the context keeps a reference until it has finished playing.
637     // When this happens, AudioScheduledSourceNode::finish() calls AudioContext::notifyNodeFinishedProcessing().
638     refNode(node.get());
639
640     return node;
641 }
642
643 PassRefPtr<PeriodicWave> AudioContext::createPeriodicWave(Float32Array* real, Float32Array* imag, ExceptionCode& ec)
644 {
645     ASSERT(isMainThread());
646     
647     if (!real || !imag || (real->length() != imag->length() || (real->length() > MaxPeriodicWaveLength) || (real->length() <= 0))) {
648         ec = SYNTAX_ERR;
649         return nullptr;
650     }
651     
652     lazyInitialize();
653     return PeriodicWave::create(sampleRate(), real, imag);
654 }
655
656 void AudioContext::notifyNodeFinishedProcessing(AudioNode* node)
657 {
658     ASSERT(isAudioThread());
659     m_finishedNodes.append(node);
660 }
661
662 void AudioContext::derefFinishedSourceNodes()
663 {
664     ASSERT(isGraphOwner());
665     ASSERT(isAudioThread() || isAudioThreadFinished());
666     for (auto& node : m_finishedNodes)
667         derefNode(node);
668
669     m_finishedNodes.clear();
670 }
671
672 void AudioContext::refNode(AudioNode* node)
673 {
674     ASSERT(isMainThread());
675     AutoLocker locker(*this);
676     
677     node->ref(AudioNode::RefTypeConnection);
678     m_referencedNodes.append(node);
679 }
680
681 void AudioContext::derefNode(AudioNode* node)
682 {
683     ASSERT(isGraphOwner());
684     
685     node->deref(AudioNode::RefTypeConnection);
686
687     ASSERT(m_referencedNodes.contains(node));
688     m_referencedNodes.removeFirst(node);
689 }
690
691 void AudioContext::derefUnfinishedSourceNodes()
692 {
693     ASSERT(isMainThread() && isAudioThreadFinished());
694     for (auto& node : m_referencedNodes)
695         node->deref(AudioNode::RefTypeConnection);
696
697     m_referencedNodes.clear();
698 }
699
700 void AudioContext::lock(bool& mustReleaseLock)
701 {
702     // Don't allow regular lock in real-time audio thread.
703     ASSERT(isMainThread());
704
705     ThreadIdentifier thisThread = currentThread();
706
707     if (thisThread == m_graphOwnerThread) {
708         // We already have the lock.
709         mustReleaseLock = false;
710     } else {
711         // Acquire the lock.
712         m_contextGraphMutex.lock();
713         m_graphOwnerThread = thisThread;
714         mustReleaseLock = true;
715     }
716 }
717
718 bool AudioContext::tryLock(bool& mustReleaseLock)
719 {
720     ThreadIdentifier thisThread = currentThread();
721     bool isAudioThread = thisThread == audioThread();
722
723     // Try to catch cases of using try lock on main thread - it should use regular lock.
724     ASSERT(isAudioThread || isAudioThreadFinished());
725     
726     if (!isAudioThread) {
727         // In release build treat tryLock() as lock() (since above ASSERT(isAudioThread) never fires) - this is the best we can do.
728         lock(mustReleaseLock);
729         return true;
730     }
731     
732     bool hasLock;
733     
734     if (thisThread == m_graphOwnerThread) {
735         // Thread already has the lock.
736         hasLock = true;
737         mustReleaseLock = false;
738     } else {
739         // Don't already have the lock - try to acquire it.
740         hasLock = m_contextGraphMutex.tryLock();
741         
742         if (hasLock)
743             m_graphOwnerThread = thisThread;
744
745         mustReleaseLock = hasLock;
746     }
747     
748     return hasLock;
749 }
750
751 void AudioContext::unlock()
752 {
753     ASSERT(currentThread() == m_graphOwnerThread);
754
755     m_graphOwnerThread = UndefinedThreadIdentifier;
756     m_contextGraphMutex.unlock();
757 }
758
759 bool AudioContext::isAudioThread() const
760 {
761     return currentThread() == m_audioThread;
762 }
763
764 bool AudioContext::isGraphOwner() const
765 {
766     return currentThread() == m_graphOwnerThread;
767 }
768
769 void AudioContext::addDeferredFinishDeref(AudioNode* node)
770 {
771     ASSERT(isAudioThread());
772     m_deferredFinishDerefList.append(node);
773 }
774
775 void AudioContext::handlePreRenderTasks()
776 {
777     ASSERT(isAudioThread());
778
779     // At the beginning of every render quantum, try to update the internal rendering graph state (from main thread changes).
780     // It's OK if the tryLock() fails, we'll just take slightly longer to pick up the changes.
781     bool mustReleaseLock;
782     if (tryLock(mustReleaseLock)) {
783         // Fixup the state of any dirty AudioSummingJunctions and AudioNodeOutputs.
784         handleDirtyAudioSummingJunctions();
785         handleDirtyAudioNodeOutputs();
786
787         updateAutomaticPullNodes();
788
789         if (mustReleaseLock)
790             unlock();
791     }
792 }
793
794 void AudioContext::handlePostRenderTasks()
795 {
796     ASSERT(isAudioThread());
797
798     // Must use a tryLock() here too. Don't worry, the lock will very rarely be contended and this method is called frequently.
799     // The worst that can happen is that there will be some nodes which will take slightly longer than usual to be deleted or removed
800     // from the render graph (in which case they'll render silence).
801     bool mustReleaseLock;
802     if (tryLock(mustReleaseLock)) {
803         // Take care of finishing any derefs where the tryLock() failed previously.
804         handleDeferredFinishDerefs();
805
806         // Dynamically clean up nodes which are no longer needed.
807         derefFinishedSourceNodes();
808
809         // Don't delete in the real-time thread. Let the main thread do it.
810         // Ref-counted objects held by certain AudioNodes may not be thread-safe.
811         scheduleNodeDeletion();
812
813         // Fixup the state of any dirty AudioSummingJunctions and AudioNodeOutputs.
814         handleDirtyAudioSummingJunctions();
815         handleDirtyAudioNodeOutputs();
816
817         updateAutomaticPullNodes();
818
819         if (mustReleaseLock)
820             unlock();
821     }
822 }
823
824 void AudioContext::handleDeferredFinishDerefs()
825 {
826     ASSERT(isAudioThread() && isGraphOwner());
827     for (auto& node : m_deferredFinishDerefList)
828         node->finishDeref(AudioNode::RefTypeConnection);
829     
830     m_deferredFinishDerefList.clear();
831 }
832
833 void AudioContext::markForDeletion(AudioNode* node)
834 {
835     ASSERT(isGraphOwner());
836
837     if (isAudioThreadFinished())
838         m_nodesToDelete.append(node);
839     else
840         m_nodesMarkedForDeletion.append(node);
841
842     // This is probably the best time for us to remove the node from automatic pull list,
843     // since all connections are gone and we hold the graph lock. Then when handlePostRenderTasks()
844     // gets a chance to schedule the deletion work, updateAutomaticPullNodes() also gets a chance to
845     // modify m_renderingAutomaticPullNodes.
846     removeAutomaticPullNode(node);
847 }
848
849 void AudioContext::scheduleNodeDeletion()
850 {
851     bool isGood = m_isInitialized && isGraphOwner();
852     ASSERT(isGood);
853     if (!isGood)
854         return;
855
856     // Make sure to call deleteMarkedNodes() on main thread.    
857     if (m_nodesMarkedForDeletion.size() && !m_isDeletionScheduled) {
858         m_nodesToDelete.appendVector(m_nodesMarkedForDeletion);
859         m_nodesMarkedForDeletion.clear();
860
861         m_isDeletionScheduled = true;
862
863         RefPtr<AudioContext> strongThis(this);
864         callOnMainThread([strongThis] {
865             strongThis->deleteMarkedNodes();
866         });
867     }
868 }
869
870 void AudioContext::deleteMarkedNodes()
871 {
872     ASSERT(isMainThread());
873
874     // Protect this object from being deleted before we release the mutex locked by AutoLocker.
875     Ref<AudioContext> protect(*this);
876     {
877         AutoLocker locker(*this);
878
879         while (m_nodesToDelete.size()) {
880             AudioNode* node = m_nodesToDelete.takeLast();
881
882             // Before deleting the node, clear out any AudioNodeInputs from m_dirtySummingJunctions.
883             unsigned numberOfInputs = node->numberOfInputs();
884             for (unsigned i = 0; i < numberOfInputs; ++i)
885                 m_dirtySummingJunctions.remove(node->input(i));
886
887             // Before deleting the node, clear out any AudioNodeOutputs from m_dirtyAudioNodeOutputs.
888             unsigned numberOfOutputs = node->numberOfOutputs();
889             for (unsigned i = 0; i < numberOfOutputs; ++i)
890                 m_dirtyAudioNodeOutputs.remove(node->output(i));
891
892             // Finally, delete it.
893             delete node;
894         }
895         m_isDeletionScheduled = false;
896     }
897 }
898
899 void AudioContext::markSummingJunctionDirty(AudioSummingJunction* summingJunction)
900 {
901     ASSERT(isGraphOwner());    
902     m_dirtySummingJunctions.add(summingJunction);
903 }
904
905 void AudioContext::removeMarkedSummingJunction(AudioSummingJunction* summingJunction)
906 {
907     ASSERT(isMainThread());
908     AutoLocker locker(*this);
909     m_dirtySummingJunctions.remove(summingJunction);
910 }
911
912 void AudioContext::markAudioNodeOutputDirty(AudioNodeOutput* output)
913 {
914     ASSERT(isGraphOwner());    
915     m_dirtyAudioNodeOutputs.add(output);
916 }
917
918 void AudioContext::handleDirtyAudioSummingJunctions()
919 {
920     ASSERT(isGraphOwner());    
921
922     for (auto& junction : m_dirtySummingJunctions)
923         junction->updateRenderingState();
924
925     m_dirtySummingJunctions.clear();
926 }
927
928 void AudioContext::handleDirtyAudioNodeOutputs()
929 {
930     ASSERT(isGraphOwner());    
931
932     for (auto& output : m_dirtyAudioNodeOutputs)
933         output->updateRenderingState();
934
935     m_dirtyAudioNodeOutputs.clear();
936 }
937
938 void AudioContext::addAutomaticPullNode(AudioNode* node)
939 {
940     ASSERT(isGraphOwner());
941
942     if (m_automaticPullNodes.add(node).isNewEntry)
943         m_automaticPullNodesNeedUpdating = true;
944 }
945
946 void AudioContext::removeAutomaticPullNode(AudioNode* node)
947 {
948     ASSERT(isGraphOwner());
949
950     if (m_automaticPullNodes.remove(node))
951         m_automaticPullNodesNeedUpdating = true;
952 }
953
954 void AudioContext::updateAutomaticPullNodes()
955 {
956     ASSERT(isGraphOwner());
957
958     if (m_automaticPullNodesNeedUpdating) {
959         // Copy from m_automaticPullNodes to m_renderingAutomaticPullNodes.
960         m_renderingAutomaticPullNodes.resize(m_automaticPullNodes.size());
961
962         unsigned i = 0;
963         for (auto& output : m_automaticPullNodes)
964             m_renderingAutomaticPullNodes[i++] = output;
965
966         m_automaticPullNodesNeedUpdating = false;
967     }
968 }
969
970 void AudioContext::processAutomaticPullNodes(size_t framesToProcess)
971 {
972     ASSERT(isAudioThread());
973
974     for (auto& node : m_renderingAutomaticPullNodes)
975         node->processIfNecessary(framesToProcess);
976 }
977
978 ScriptExecutionContext* AudioContext::scriptExecutionContext() const
979 {
980     return m_isStopScheduled ? 0 : ActiveDOMObject::scriptExecutionContext();
981 }
982
983 void AudioContext::nodeWillBeginPlayback()
984 {
985     // Called by scheduled AudioNodes when clients schedule their start times.
986     // Prior to the introduction of suspend(), resume(), and stop(), starting
987     // a scheduled AudioNode would remove the user-gesture restriction, if present,
988     // and would thus unmute the context. Now that AudioContext stays in the
989     // "suspended" state if a user-gesture restriction is present, starting a
990     // schedule AudioNode should set the state to "running", but only if the
991     // user-gesture restriction is set.
992     if (userGestureRequiredForAudioStart())
993         startRendering();
994 }
995
996 bool AudioContext::willBeginPlayback()
997 {
998     if (userGestureRequiredForAudioStart()) {
999         if (!ScriptController::processingUserGesture())
1000             return false;
1001         removeBehaviorRestriction(AudioContext::RequireUserGestureForAudioStartRestriction);
1002     }
1003
1004     if (pageConsentRequiredForAudioStart()) {
1005         Page* page = document()->page();
1006         if (page && !page->canStartMedia()) {
1007             document()->addMediaCanStartListener(this);
1008             return false;
1009         }
1010         removeBehaviorRestriction(AudioContext::RequirePageConsentForAudioStartRestriction);
1011     }
1012
1013     return m_mediaSession->clientWillBeginPlayback();
1014 }
1015
1016 bool AudioContext::willPausePlayback()
1017 {
1018     if (userGestureRequiredForAudioStart()) {
1019         if (!ScriptController::processingUserGesture())
1020             return false;
1021         removeBehaviorRestriction(AudioContext::RequireUserGestureForAudioStartRestriction);
1022     }
1023
1024     if (pageConsentRequiredForAudioStart()) {
1025         Page* page = document()->page();
1026         if (page && !page->canStartMedia()) {
1027             document()->addMediaCanStartListener(this);
1028             return false;
1029         }
1030         removeBehaviorRestriction(AudioContext::RequirePageConsentForAudioStartRestriction);
1031     }
1032     
1033     return m_mediaSession->clientWillPausePlayback();
1034 }
1035
1036 void AudioContext::startRendering()
1037 {
1038     if (!willBeginPlayback())
1039         return;
1040
1041     destination()->startRendering();
1042     setState(State::Running);
1043 }
1044
1045 void AudioContext::mediaCanStart()
1046 {
1047     removeBehaviorRestriction(AudioContext::RequirePageConsentForAudioStartRestriction);
1048 }
1049
1050 MediaProducer::MediaStateFlags AudioContext::mediaState() const
1051 {
1052     if (!m_isStopScheduled && m_destinationNode && m_destinationNode->isPlayingAudio())
1053         return MediaProducer::IsPlayingAudio;
1054
1055     return MediaProducer::IsNotPlaying;
1056 }
1057
1058 void AudioContext::pageMutedStateDidChange()
1059 {
1060     if (m_destinationNode && document()->page())
1061         m_destinationNode->setMuted(document()->page()->isMuted());
1062 }
1063
1064 void AudioContext::isPlayingAudioDidChange()
1065 {
1066     document()->updateIsPlayingMedia();
1067 }
1068
1069 void AudioContext::fireCompletionEvent()
1070 {
1071     ASSERT(isMainThread());
1072     if (!isMainThread())
1073         return;
1074         
1075     AudioBuffer* renderedBuffer = m_renderTarget.get();
1076     setState(State::Closed);
1077
1078     ASSERT(renderedBuffer);
1079     if (!renderedBuffer)
1080         return;
1081
1082     // Avoid firing the event if the document has already gone away.
1083     if (scriptExecutionContext()) {
1084         // Call the offline rendering completion event listener.
1085         m_eventQueue->enqueueEvent(OfflineAudioCompletionEvent::create(renderedBuffer));
1086     }
1087 }
1088
1089 void AudioContext::incrementActiveSourceCount()
1090 {
1091     ++m_activeSourceCount;
1092 }
1093
1094 void AudioContext::decrementActiveSourceCount()
1095 {
1096     --m_activeSourceCount;
1097 }
1098
1099 void AudioContext::suspendContext(std::function<void()> successCallback, FailureCallback failureCallback)
1100 {
1101     ASSERT(successCallback);
1102     ASSERT(failureCallback);
1103
1104     if (isOfflineContext()) {
1105         failureCallback(INVALID_STATE_ERR);
1106         return;
1107     }
1108
1109     if (m_state == State::Suspended) {
1110         successCallback();
1111         return;
1112     }
1113
1114     if (m_state == State::Closed || m_state == State::Interrupted || !m_destinationNode) {
1115         failureCallback(0);
1116         return;
1117     }
1118
1119     addReaction(State::Suspended, successCallback);
1120
1121     if (!willPausePlayback())
1122         return;
1123
1124     lazyInitialize();
1125
1126     RefPtr<AudioContext> strongThis(this);
1127     m_destinationNode->suspend([strongThis] {
1128         strongThis->setState(State::Suspended);
1129     });
1130 }
1131
1132 void AudioContext::resumeContext(std::function<void()> successCallback, FailureCallback failureCallback)
1133 {
1134     ASSERT(successCallback);
1135     ASSERT(failureCallback);
1136
1137     if (isOfflineContext()) {
1138         failureCallback(INVALID_STATE_ERR);
1139         return;
1140     }
1141
1142     if (m_state == State::Running) {
1143         successCallback();
1144         return;
1145     }
1146
1147     if (m_state == State::Closed || !m_destinationNode) {
1148         failureCallback(0);
1149         return;
1150     }
1151
1152     addReaction(State::Running, successCallback);
1153
1154     if (!willBeginPlayback())
1155         return;
1156
1157     lazyInitialize();
1158
1159     RefPtr<AudioContext> strongThis(this);
1160     m_destinationNode->resume([strongThis] {
1161         strongThis->setState(State::Running);
1162     });
1163 }
1164
1165 void AudioContext::closeContext(std::function<void()> successCallback, FailureCallback failureCallback)
1166 {
1167     ASSERT(successCallback);
1168     ASSERT(failureCallback);
1169
1170     if (isOfflineContext()) {
1171         failureCallback(INVALID_STATE_ERR);
1172         return;
1173     }
1174
1175     if (m_state == State::Closed || !m_destinationNode) {
1176         successCallback();
1177         return;
1178     }
1179
1180     addReaction(State::Closed, successCallback);
1181
1182     lazyInitialize();
1183
1184     RefPtr<AudioContext> strongThis(this);
1185     m_destinationNode->close([strongThis, successCallback] {
1186         strongThis->setState(State::Closed);
1187         strongThis->uninitialize();
1188     });
1189 }
1190
1191
1192 void AudioContext::suspendPlayback()
1193 {
1194     if (!m_destinationNode || m_state == State::Closed)
1195         return;
1196
1197     if (m_state == State::Suspended) {
1198         if (m_mediaSession->state() == PlatformMediaSession::Interrupted)
1199             setState(State::Interrupted);
1200         return;
1201     }
1202
1203     lazyInitialize();
1204
1205     RefPtr<AudioContext> strongThis(this);
1206     m_destinationNode->suspend([strongThis] {
1207         bool interrupted = strongThis->m_mediaSession->state() == PlatformMediaSession::Interrupted;
1208         strongThis->setState(interrupted ? State::Interrupted : State::Suspended);
1209     });
1210 }
1211
1212 void AudioContext::mayResumePlayback(bool shouldResume)
1213 {
1214     if (!m_destinationNode || m_state == State::Closed || m_state == State::Running)
1215         return;
1216
1217     if (!shouldResume) {
1218         setState(State::Suspended);
1219         return;
1220     }
1221
1222     if (!willBeginPlayback())
1223         return;
1224
1225     lazyInitialize();
1226
1227     RefPtr<AudioContext> strongThis(this);
1228     m_destinationNode->resume([strongThis] {
1229         strongThis->setState(State::Running);
1230     });
1231 }
1232
1233
1234 } // namespace WebCore
1235
1236 #endif // ENABLE(WEB_AUDIO)