a29316a15ff2eb79589206a124510dfa07e5810a
[WebKit-https.git] / Source / WebCore / Modules / webaudio / AudioContext.cpp
1 /*
2  * Copyright (C) 2010, Google Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1.  Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2.  Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
15  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
16  * DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
17  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
18  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
19  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
20  * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
21  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
22  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
23  */
24
25 #include "config.h"
26
27 #if ENABLE(WEB_AUDIO)
28
29 #include "AudioContext.h"
30
31 #include "AnalyserNode.h"
32 #include "AsyncAudioDecoder.h"
33 #include "AudioBuffer.h"
34 #include "AudioBufferCallback.h"
35 #include "AudioBufferSourceNode.h"
36 #include "AudioListener.h"
37 #include "AudioNodeInput.h"
38 #include "AudioNodeOutput.h"
39 #include "BiquadFilterNode.h"
40 #include "ChannelMergerNode.h"
41 #include "ChannelSplitterNode.h"
42 #include "ConvolverNode.h"
43 #include "DefaultAudioDestinationNode.h"
44 #include "DelayNode.h"
45 #include "Document.h"
46 #include "DynamicsCompressorNode.h"
47 #include "EventNames.h"
48 #include "ExceptionCode.h"
49 #include "FFTFrame.h"
50 #include "GainNode.h"
51 #include "GenericEventQueue.h"
52 #include "HRTFDatabaseLoader.h"
53 #include "HRTFPanner.h"
54 #include "OfflineAudioCompletionEvent.h"
55 #include "OfflineAudioDestinationNode.h"
56 #include "OscillatorNode.h"
57 #include "Page.h"
58 #include "PannerNode.h"
59 #include "PeriodicWave.h"
60 #include "ScriptController.h"
61 #include "ScriptProcessorNode.h"
62 #include "WaveShaperNode.h"
63 #include <inspector/ScriptCallStack.h>
64 #include <wtf/NeverDestroyed.h>
65
66 #if ENABLE(MEDIA_STREAM)
67 #include "MediaStream.h"
68 #include "MediaStreamAudioDestinationNode.h"
69 #include "MediaStreamAudioSource.h"
70 #include "MediaStreamAudioSourceNode.h"
71 #endif
72
73 #if ENABLE(VIDEO)
74 #include "HTMLMediaElement.h"
75 #include "MediaElementAudioSourceNode.h"
76 #endif
77
78 #if DEBUG_AUDIONODE_REFERENCES
79 #include <stdio.h>
80 #endif
81
82 #if USE(GSTREAMER)
83 #include "GStreamerUtilities.h"
84 #endif
85
86 #if PLATFORM(IOS)
87 #include "ScriptController.h"
88 #include "Settings.h"
89 #endif
90
91 #include <runtime/ArrayBuffer.h>
92 #include <wtf/Atomics.h>
93 #include <wtf/MainThread.h>
94 #include <wtf/Ref.h>
95 #include <wtf/RefCounted.h>
96 #include <wtf/text/WTFString.h>
97
98 // FIXME: check the proper way to reference an undefined thread ID
99 const int UndefinedThreadIdentifier = 0xffffffff;
100
101 const unsigned MaxPeriodicWaveLength = 4096;
102
103 namespace WebCore {
104     
105 bool AudioContext::isSampleRateRangeGood(float sampleRate)
106 {
107     // FIXME: It would be nice if the minimum sample-rate could be less than 44.1KHz,
108     // but that will require some fixes in HRTFPanner::fftSizeForSampleRate(), and some testing there.
109     return sampleRate >= 44100 && sampleRate <= 96000;
110 }
111
112 // Don't allow more than this number of simultaneous AudioContexts talking to hardware.
113 const unsigned MaxHardwareContexts = 4;
114 unsigned AudioContext::s_hardwareContextCount = 0;
115     
116 RefPtr<AudioContext> AudioContext::create(Document& document, ExceptionCode& ec)
117 {
118     UNUSED_PARAM(ec);
119
120     ASSERT(isMainThread());
121     if (s_hardwareContextCount >= MaxHardwareContexts)
122         return nullptr;
123
124     RefPtr<AudioContext> audioContext(adoptRef(new AudioContext(document)));
125     audioContext->suspendIfNeeded();
126     return audioContext;
127 }
128
129 // Constructor for rendering to the audio hardware.
130 AudioContext::AudioContext(Document& document)
131     : ActiveDOMObject(&document)
132     , m_mediaSession(PlatformMediaSession::create(*this))
133     , m_eventQueue(std::make_unique<GenericEventQueue>(*this))
134     , m_graphOwnerThread(UndefinedThreadIdentifier)
135 {
136     constructCommon();
137
138     m_destinationNode = DefaultAudioDestinationNode::create(this);
139
140     // Initialize the destination node's muted state to match the page's current muted state.
141     pageMutedStateDidChange();
142 }
143
144 // Constructor for offline (non-realtime) rendering.
145 AudioContext::AudioContext(Document& document, unsigned numberOfChannels, size_t numberOfFrames, float sampleRate)
146     : ActiveDOMObject(&document)
147     , m_isOfflineContext(true)
148     , m_mediaSession(PlatformMediaSession::create(*this))
149     , m_eventQueue(std::make_unique<GenericEventQueue>(*this))
150     , m_graphOwnerThread(UndefinedThreadIdentifier)
151 {
152     constructCommon();
153
154     // Create a new destination for offline rendering.
155     m_renderTarget = AudioBuffer::create(numberOfChannels, numberOfFrames, sampleRate);
156     m_destinationNode = OfflineAudioDestinationNode::create(this, m_renderTarget.get());
157 }
158
159 void AudioContext::constructCommon()
160 {
161     // According to spec AudioContext must die only after page navigate.
162     // Lets mark it as ActiveDOMObject with pending activity and unmark it in clear method.
163     setPendingActivity(this);
164
165 #if USE(GSTREAMER)
166     initializeGStreamer();
167 #endif
168
169     FFTFrame::initialize();
170     
171     m_listener = AudioListener::create();
172
173 #if PLATFORM(IOS)
174     if (!document()->settings() || document()->settings()->requiresUserGestureForMediaPlayback())
175         addBehaviorRestriction(RequireUserGestureForAudioStartRestriction);
176     else
177         m_restrictions = NoRestrictions;
178 #endif
179
180 #if PLATFORM(COCOA)
181     addBehaviorRestriction(RequirePageConsentForAudioStartRestriction);
182 #endif
183 }
184
185 AudioContext::~AudioContext()
186 {
187 #if DEBUG_AUDIONODE_REFERENCES
188     fprintf(stderr, "%p: AudioContext::~AudioContext()\n", this);
189 #endif
190     ASSERT(!m_isInitialized);
191     ASSERT(m_isStopScheduled);
192     ASSERT(m_nodesToDelete.isEmpty());
193     ASSERT(m_referencedNodes.isEmpty());
194     ASSERT(m_finishedNodes.isEmpty()); // FIXME (bug 105870): This assertion fails on tests sometimes.
195     ASSERT(m_automaticPullNodes.isEmpty());
196     if (m_automaticPullNodesNeedUpdating)
197         m_renderingAutomaticPullNodes.resize(m_automaticPullNodes.size());
198     ASSERT(m_renderingAutomaticPullNodes.isEmpty());
199     // FIXME: Can we assert that m_deferredFinishDerefList is empty?
200 }
201
202 void AudioContext::lazyInitialize()
203 {
204     if (m_isInitialized)
205         return;
206
207     // Don't allow the context to initialize a second time after it's already been explicitly uninitialized.
208     ASSERT(!m_isAudioThreadFinished);
209     if (m_isAudioThreadFinished)
210         return;
211
212     if (m_destinationNode.get()) {
213         m_destinationNode->initialize();
214
215         if (!isOfflineContext()) {
216             document()->addAudioProducer(this);
217
218             // This starts the audio thread. The destination node's provideInput() method will now be called repeatedly to render audio.
219             // Each time provideInput() is called, a portion of the audio stream is rendered. Let's call this time period a "render quantum".
220             // NOTE: for now default AudioContext does not need an explicit startRendering() call from JavaScript.
221             // We may want to consider requiring it for symmetry with OfflineAudioContext.
222             startRendering();
223             ++s_hardwareContextCount;
224         }
225     }
226     m_isInitialized = true;
227 }
228
229 void AudioContext::clear()
230 {
231     // We have to release our reference to the destination node before the context will ever be deleted since the destination node holds a reference to the context.
232     if (m_destinationNode)
233         m_destinationNode.clear();
234
235     // Audio thread is dead. Nobody will schedule node deletion action. Let's do it ourselves.
236     do {
237         deleteMarkedNodes();
238         m_nodesToDelete.appendVector(m_nodesMarkedForDeletion);
239         m_nodesMarkedForDeletion.clear();
240     } while (m_nodesToDelete.size());
241
242     // It was set in constructCommon.
243     unsetPendingActivity(this);
244 }
245
246 void AudioContext::uninitialize()
247 {
248     ASSERT(isMainThread());
249
250     if (!m_isInitialized)
251         return;
252
253     // This stops the audio thread and all audio rendering.
254     m_destinationNode->uninitialize();
255
256     // Don't allow the context to initialize a second time after it's already been explicitly uninitialized.
257     m_isAudioThreadFinished = true;
258
259     if (!isOfflineContext()) {
260         document()->removeAudioProducer(this);
261
262         ASSERT(s_hardwareContextCount);
263         --s_hardwareContextCount;
264
265         // Offline contexts move to 'Closed' state when dispatching the completion event.
266         setState(State::Closed);
267     }
268
269     // Get rid of the sources which may still be playing.
270     derefUnfinishedSourceNodes();
271
272     m_isInitialized = false;
273 }
274
275 bool AudioContext::isInitialized() const
276 {
277     return m_isInitialized;
278 }
279
280 void AudioContext::addReaction(State state, std::function<void()> reaction)
281 {
282     size_t stateIndex = static_cast<size_t>(state);
283     if (stateIndex >= m_stateReactions.size())
284         m_stateReactions.resize(stateIndex + 1);
285
286     m_stateReactions[stateIndex].append(reaction);
287 }
288
289 void AudioContext::setState(State state)
290 {
291     if (m_state == state)
292         return;
293
294     m_state = state;
295     m_eventQueue->enqueueEvent(Event::create(eventNames().statechangeEvent, true, false));
296
297     size_t stateIndex = static_cast<size_t>(state);
298     if (stateIndex >= m_stateReactions.size())
299         return;
300
301     Vector<std::function<void()>> reactions;
302     m_stateReactions[stateIndex].swap(reactions);
303
304     for (auto& reaction : reactions)
305         reaction();
306 }
307
308 const AtomicString& AudioContext::state() const
309 {
310     static NeverDestroyed<AtomicString> suspended("suspended");
311     static NeverDestroyed<AtomicString> running("running");
312     static NeverDestroyed<AtomicString> interrupted("interrupted");
313     static NeverDestroyed<AtomicString> closed("closed");
314
315     switch (m_state) {
316     case State::Suspended:
317         return suspended;
318     case State::Running:
319         return running;
320     case State::Interrupted:
321         return interrupted;
322     case State::Closed:
323         return closed;
324     }
325
326     ASSERT_NOT_REACHED();
327     return suspended;
328 }
329
330 void AudioContext::stop()
331 {
332     ASSERT(isMainThread());
333
334     // Usually ScriptExecutionContext calls stop twice.
335     if (m_isStopScheduled)
336         return;
337     m_isStopScheduled = true;
338
339     document()->updateIsPlayingMedia();
340
341     m_eventQueue->close();
342
343     // Don't call uninitialize() immediately here because the ScriptExecutionContext is in the middle
344     // of dealing with all of its ActiveDOMObjects at this point. uninitialize() can de-reference other
345     // ActiveDOMObjects so let's schedule uninitialize() to be called later.
346     // FIXME: see if there's a more direct way to handle this issue.
347     // FIXME: This sounds very wrong. The whole idea of stop() is that it stops everything, and if we
348     // schedule some observable work for later, the work likely happens at an inappropriate time.
349     callOnMainThread([this] {
350         uninitialize();
351         clear();
352     });
353 }
354
355 bool AudioContext::canSuspendForPageCache() const
356 {
357     // FIXME: We should be able to suspend while rendering as well with some more code.
358     return m_state == State::Suspended || m_state == State::Closed;
359 }
360
361 const char* AudioContext::activeDOMObjectName() const
362 {
363     return "AudioContext";
364 }
365
366 Document* AudioContext::document() const
367 {
368     ASSERT(m_scriptExecutionContext);
369     return downcast<Document>(m_scriptExecutionContext);
370 }
371
372 PassRefPtr<AudioBuffer> AudioContext::createBuffer(unsigned numberOfChannels, size_t numberOfFrames, float sampleRate, ExceptionCode& ec)
373 {
374     RefPtr<AudioBuffer> audioBuffer = AudioBuffer::create(numberOfChannels, numberOfFrames, sampleRate);
375     if (!audioBuffer.get()) {
376         ec = NOT_SUPPORTED_ERR;
377         return nullptr;
378     }
379
380     return audioBuffer;
381 }
382
383 PassRefPtr<AudioBuffer> AudioContext::createBuffer(ArrayBuffer* arrayBuffer, bool mixToMono, ExceptionCode& ec)
384 {
385     ASSERT(arrayBuffer);
386     if (!arrayBuffer) {
387         ec = SYNTAX_ERR;
388         return nullptr;
389     }
390
391     RefPtr<AudioBuffer> audioBuffer = AudioBuffer::createFromAudioFileData(arrayBuffer->data(), arrayBuffer->byteLength(), mixToMono, sampleRate());
392     if (!audioBuffer.get()) {
393         ec = SYNTAX_ERR;
394         return nullptr;
395     }
396
397     return audioBuffer;
398 }
399
400 void AudioContext::decodeAudioData(ArrayBuffer* audioData, PassRefPtr<AudioBufferCallback> successCallback, PassRefPtr<AudioBufferCallback> errorCallback, ExceptionCode& ec)
401 {
402     if (!audioData) {
403         ec = SYNTAX_ERR;
404         return;
405     }
406     m_audioDecoder.decodeAsync(audioData, sampleRate(), successCallback, errorCallback);
407 }
408
409 PassRefPtr<AudioBufferSourceNode> AudioContext::createBufferSource()
410 {
411     ASSERT(isMainThread());
412     lazyInitialize();
413     RefPtr<AudioBufferSourceNode> node = AudioBufferSourceNode::create(this, m_destinationNode->sampleRate());
414
415     // Because this is an AudioScheduledSourceNode, the context keeps a reference until it has finished playing.
416     // When this happens, AudioScheduledSourceNode::finish() calls AudioContext::notifyNodeFinishedProcessing().
417     refNode(node.get());
418
419     return node;
420 }
421
422 #if ENABLE(VIDEO)
423 PassRefPtr<MediaElementAudioSourceNode> AudioContext::createMediaElementSource(HTMLMediaElement* mediaElement, ExceptionCode& ec)
424 {
425     ASSERT(mediaElement);
426     if (!mediaElement) {
427         ec = INVALID_STATE_ERR;
428         return nullptr;
429     }
430         
431     ASSERT(isMainThread());
432     lazyInitialize();
433     
434     // First check if this media element already has a source node.
435     if (mediaElement->audioSourceNode()) {
436         ec = INVALID_STATE_ERR;
437         return nullptr;
438     }
439         
440     RefPtr<MediaElementAudioSourceNode> node = MediaElementAudioSourceNode::create(this, mediaElement);
441
442     mediaElement->setAudioSourceNode(node.get());
443
444     refNode(node.get()); // context keeps reference until node is disconnected
445     return node;
446 }
447 #endif
448
449 #if ENABLE(MEDIA_STREAM)
450 PassRefPtr<MediaStreamAudioSourceNode> AudioContext::createMediaStreamSource(MediaStream* mediaStream, ExceptionCode& ec)
451 {
452     ASSERT(mediaStream);
453     if (!mediaStream) {
454         ec = INVALID_STATE_ERR;
455         return nullptr;
456     }
457
458     ASSERT(isMainThread());
459     lazyInitialize();
460
461     AudioSourceProvider* provider = 0;
462
463     Vector<RefPtr<MediaStreamTrack>> audioTracks = mediaStream->getAudioTracks();
464     RefPtr<MediaStreamTrack> audioTrack;
465
466     // FIXME: get a provider for non-local MediaStreams (like from a remote peer).
467     for (size_t i = 0; i < audioTracks.size(); ++i) {
468         audioTrack = audioTracks[i];
469         if (audioTrack->source()->isAudioStreamSource()) {
470             auto source = static_cast<MediaStreamAudioSource*>(audioTrack->source());
471             ASSERT(!source->deviceId().isEmpty());
472             destination()->enableInput(source->deviceId());
473             provider = destination()->localAudioInputProvider();
474             break;
475         }
476     }
477
478     RefPtr<MediaStreamAudioSourceNode> node = MediaStreamAudioSourceNode::create(this, mediaStream, audioTrack.get(), provider);
479
480     // FIXME: Only stereo streams are supported right now. We should be able to accept multi-channel streams.
481     node->setFormat(2, sampleRate());
482
483     refNode(node.get()); // context keeps reference until node is disconnected
484     return node;
485 }
486
487 PassRefPtr<MediaStreamAudioDestinationNode> AudioContext::createMediaStreamDestination()
488 {
489     // FIXME: Add support for an optional argument which specifies the number of channels.
490     // FIXME: The default should probably be stereo instead of mono.
491     return MediaStreamAudioDestinationNode::create(this, 1);
492 }
493
494 #endif
495
496 PassRefPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(size_t bufferSize, ExceptionCode& ec)
497 {
498     // Set number of input/output channels to stereo by default.
499     return createScriptProcessor(bufferSize, 2, 2, ec);
500 }
501
502 PassRefPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(size_t bufferSize, size_t numberOfInputChannels, ExceptionCode& ec)
503 {
504     // Set number of output channels to stereo by default.
505     return createScriptProcessor(bufferSize, numberOfInputChannels, 2, ec);
506 }
507
508 PassRefPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(size_t bufferSize, size_t numberOfInputChannels, size_t numberOfOutputChannels, ExceptionCode& ec)
509 {
510     ASSERT(isMainThread());
511     lazyInitialize();
512     RefPtr<ScriptProcessorNode> node = ScriptProcessorNode::create(this, m_destinationNode->sampleRate(), bufferSize, numberOfInputChannels, numberOfOutputChannels);
513
514     if (!node.get()) {
515         ec = INDEX_SIZE_ERR;
516         return nullptr;
517     }
518
519     refNode(node.get()); // context keeps reference until we stop making javascript rendering callbacks
520     return node;
521 }
522
523 PassRefPtr<BiquadFilterNode> AudioContext::createBiquadFilter()
524 {
525     ASSERT(isMainThread());
526     lazyInitialize();
527     return BiquadFilterNode::create(this, m_destinationNode->sampleRate());
528 }
529
530 PassRefPtr<WaveShaperNode> AudioContext::createWaveShaper()
531 {
532     ASSERT(isMainThread());
533     lazyInitialize();
534     return WaveShaperNode::create(this);
535 }
536
537 PassRefPtr<PannerNode> AudioContext::createPanner()
538 {
539     ASSERT(isMainThread());
540     lazyInitialize();
541     return PannerNode::create(this, m_destinationNode->sampleRate());
542 }
543
544 PassRefPtr<ConvolverNode> AudioContext::createConvolver()
545 {
546     ASSERT(isMainThread());
547     lazyInitialize();
548     return ConvolverNode::create(this, m_destinationNode->sampleRate());
549 }
550
551 PassRefPtr<DynamicsCompressorNode> AudioContext::createDynamicsCompressor()
552 {
553     ASSERT(isMainThread());
554     lazyInitialize();
555     return DynamicsCompressorNode::create(this, m_destinationNode->sampleRate());
556 }
557
558 PassRefPtr<AnalyserNode> AudioContext::createAnalyser()
559 {
560     ASSERT(isMainThread());
561     lazyInitialize();
562     return AnalyserNode::create(this, m_destinationNode->sampleRate());
563 }
564
565 PassRefPtr<GainNode> AudioContext::createGain()
566 {
567     ASSERT(isMainThread());
568     lazyInitialize();
569     return GainNode::create(this, m_destinationNode->sampleRate());
570 }
571
572 PassRefPtr<DelayNode> AudioContext::createDelay(ExceptionCode& ec)
573 {
574     const double defaultMaxDelayTime = 1;
575     return createDelay(defaultMaxDelayTime, ec);
576 }
577
578 PassRefPtr<DelayNode> AudioContext::createDelay(double maxDelayTime, ExceptionCode& ec)
579 {
580     ASSERT(isMainThread());
581     lazyInitialize();
582     RefPtr<DelayNode> node = DelayNode::create(this, m_destinationNode->sampleRate(), maxDelayTime, ec);
583     if (ec)
584         return nullptr;
585     return node;
586 }
587
588 PassRefPtr<ChannelSplitterNode> AudioContext::createChannelSplitter(ExceptionCode& ec)
589 {
590     const unsigned ChannelSplitterDefaultNumberOfOutputs = 6;
591     return createChannelSplitter(ChannelSplitterDefaultNumberOfOutputs, ec);
592 }
593
594 PassRefPtr<ChannelSplitterNode> AudioContext::createChannelSplitter(size_t numberOfOutputs, ExceptionCode& ec)
595 {
596     ASSERT(isMainThread());
597     lazyInitialize();
598
599     RefPtr<ChannelSplitterNode> node = ChannelSplitterNode::create(this, m_destinationNode->sampleRate(), numberOfOutputs);
600
601     if (!node.get()) {
602         ec = SYNTAX_ERR;
603         return nullptr;
604     }
605
606     return node;
607 }
608
609 PassRefPtr<ChannelMergerNode> AudioContext::createChannelMerger(ExceptionCode& ec)
610 {
611     const unsigned ChannelMergerDefaultNumberOfInputs = 6;
612     return createChannelMerger(ChannelMergerDefaultNumberOfInputs, ec);
613 }
614
615 PassRefPtr<ChannelMergerNode> AudioContext::createChannelMerger(size_t numberOfInputs, ExceptionCode& ec)
616 {
617     ASSERT(isMainThread());
618     lazyInitialize();
619
620     RefPtr<ChannelMergerNode> node = ChannelMergerNode::create(this, m_destinationNode->sampleRate(), numberOfInputs);
621
622     if (!node.get()) {
623         ec = SYNTAX_ERR;
624         return nullptr;
625     }
626
627     return node;
628 }
629
630 PassRefPtr<OscillatorNode> AudioContext::createOscillator()
631 {
632     ASSERT(isMainThread());
633     lazyInitialize();
634
635     RefPtr<OscillatorNode> node = OscillatorNode::create(this, m_destinationNode->sampleRate());
636
637     // Because this is an AudioScheduledSourceNode, the context keeps a reference until it has finished playing.
638     // When this happens, AudioScheduledSourceNode::finish() calls AudioContext::notifyNodeFinishedProcessing().
639     refNode(node.get());
640
641     return node;
642 }
643
644 PassRefPtr<PeriodicWave> AudioContext::createPeriodicWave(Float32Array* real, Float32Array* imag, ExceptionCode& ec)
645 {
646     ASSERT(isMainThread());
647     
648     if (!real || !imag || (real->length() != imag->length() || (real->length() > MaxPeriodicWaveLength) || (real->length() <= 0))) {
649         ec = SYNTAX_ERR;
650         return nullptr;
651     }
652     
653     lazyInitialize();
654     return PeriodicWave::create(sampleRate(), real, imag);
655 }
656
657 void AudioContext::notifyNodeFinishedProcessing(AudioNode* node)
658 {
659     ASSERT(isAudioThread());
660     m_finishedNodes.append(node);
661 }
662
663 void AudioContext::derefFinishedSourceNodes()
664 {
665     ASSERT(isGraphOwner());
666     ASSERT(isAudioThread() || isAudioThreadFinished());
667     for (unsigned i = 0; i < m_finishedNodes.size(); i++)
668         derefNode(m_finishedNodes[i]);
669
670     m_finishedNodes.clear();
671 }
672
673 void AudioContext::refNode(AudioNode* node)
674 {
675     ASSERT(isMainThread());
676     AutoLocker locker(*this);
677     
678     node->ref(AudioNode::RefTypeConnection);
679     m_referencedNodes.append(node);
680 }
681
682 void AudioContext::derefNode(AudioNode* node)
683 {
684     ASSERT(isGraphOwner());
685     
686     node->deref(AudioNode::RefTypeConnection);
687
688     ASSERT(m_referencedNodes.contains(node));
689     m_referencedNodes.removeFirst(node);
690 }
691
692 void AudioContext::derefUnfinishedSourceNodes()
693 {
694     ASSERT(isMainThread() && isAudioThreadFinished());
695     for (unsigned i = 0; i < m_referencedNodes.size(); ++i)
696         m_referencedNodes[i]->deref(AudioNode::RefTypeConnection);
697
698     m_referencedNodes.clear();
699 }
700
701 void AudioContext::lock(bool& mustReleaseLock)
702 {
703     // Don't allow regular lock in real-time audio thread.
704     ASSERT(isMainThread());
705
706     ThreadIdentifier thisThread = currentThread();
707
708     if (thisThread == m_graphOwnerThread) {
709         // We already have the lock.
710         mustReleaseLock = false;
711     } else {
712         // Acquire the lock.
713         m_contextGraphMutex.lock();
714         m_graphOwnerThread = thisThread;
715         mustReleaseLock = true;
716     }
717 }
718
719 bool AudioContext::tryLock(bool& mustReleaseLock)
720 {
721     ThreadIdentifier thisThread = currentThread();
722     bool isAudioThread = thisThread == audioThread();
723
724     // Try to catch cases of using try lock on main thread - it should use regular lock.
725     ASSERT(isAudioThread || isAudioThreadFinished());
726     
727     if (!isAudioThread) {
728         // In release build treat tryLock() as lock() (since above ASSERT(isAudioThread) never fires) - this is the best we can do.
729         lock(mustReleaseLock);
730         return true;
731     }
732     
733     bool hasLock;
734     
735     if (thisThread == m_graphOwnerThread) {
736         // Thread already has the lock.
737         hasLock = true;
738         mustReleaseLock = false;
739     } else {
740         // Don't already have the lock - try to acquire it.
741         hasLock = m_contextGraphMutex.tryLock();
742         
743         if (hasLock)
744             m_graphOwnerThread = thisThread;
745
746         mustReleaseLock = hasLock;
747     }
748     
749     return hasLock;
750 }
751
752 void AudioContext::unlock()
753 {
754     ASSERT(currentThread() == m_graphOwnerThread);
755
756     m_graphOwnerThread = UndefinedThreadIdentifier;
757     m_contextGraphMutex.unlock();
758 }
759
760 bool AudioContext::isAudioThread() const
761 {
762     return currentThread() == m_audioThread;
763 }
764
765 bool AudioContext::isGraphOwner() const
766 {
767     return currentThread() == m_graphOwnerThread;
768 }
769
770 void AudioContext::addDeferredFinishDeref(AudioNode* node)
771 {
772     ASSERT(isAudioThread());
773     m_deferredFinishDerefList.append(node);
774 }
775
776 void AudioContext::handlePreRenderTasks()
777 {
778     ASSERT(isAudioThread());
779
780     // At the beginning of every render quantum, try to update the internal rendering graph state (from main thread changes).
781     // It's OK if the tryLock() fails, we'll just take slightly longer to pick up the changes.
782     bool mustReleaseLock;
783     if (tryLock(mustReleaseLock)) {
784         // Fixup the state of any dirty AudioSummingJunctions and AudioNodeOutputs.
785         handleDirtyAudioSummingJunctions();
786         handleDirtyAudioNodeOutputs();
787
788         updateAutomaticPullNodes();
789
790         if (mustReleaseLock)
791             unlock();
792     }
793 }
794
795 void AudioContext::handlePostRenderTasks()
796 {
797     ASSERT(isAudioThread());
798
799     // Must use a tryLock() here too. Don't worry, the lock will very rarely be contended and this method is called frequently.
800     // The worst that can happen is that there will be some nodes which will take slightly longer than usual to be deleted or removed
801     // from the render graph (in which case they'll render silence).
802     bool mustReleaseLock;
803     if (tryLock(mustReleaseLock)) {
804         // Take care of finishing any derefs where the tryLock() failed previously.
805         handleDeferredFinishDerefs();
806
807         // Dynamically clean up nodes which are no longer needed.
808         derefFinishedSourceNodes();
809
810         // Don't delete in the real-time thread. Let the main thread do it.
811         // Ref-counted objects held by certain AudioNodes may not be thread-safe.
812         scheduleNodeDeletion();
813
814         // Fixup the state of any dirty AudioSummingJunctions and AudioNodeOutputs.
815         handleDirtyAudioSummingJunctions();
816         handleDirtyAudioNodeOutputs();
817
818         updateAutomaticPullNodes();
819
820         if (mustReleaseLock)
821             unlock();
822     }
823 }
824
825 void AudioContext::handleDeferredFinishDerefs()
826 {
827     ASSERT(isAudioThread() && isGraphOwner());
828     for (unsigned i = 0; i < m_deferredFinishDerefList.size(); ++i) {
829         AudioNode* node = m_deferredFinishDerefList[i];
830         node->finishDeref(AudioNode::RefTypeConnection);
831     }
832     
833     m_deferredFinishDerefList.clear();
834 }
835
836 void AudioContext::markForDeletion(AudioNode* node)
837 {
838     ASSERT(isGraphOwner());
839
840     if (isAudioThreadFinished())
841         m_nodesToDelete.append(node);
842     else
843         m_nodesMarkedForDeletion.append(node);
844
845     // This is probably the best time for us to remove the node from automatic pull list,
846     // since all connections are gone and we hold the graph lock. Then when handlePostRenderTasks()
847     // gets a chance to schedule the deletion work, updateAutomaticPullNodes() also gets a chance to
848     // modify m_renderingAutomaticPullNodes.
849     removeAutomaticPullNode(node);
850 }
851
852 void AudioContext::scheduleNodeDeletion()
853 {
854     bool isGood = m_isInitialized && isGraphOwner();
855     ASSERT(isGood);
856     if (!isGood)
857         return;
858
859     // Make sure to call deleteMarkedNodes() on main thread.    
860     if (m_nodesMarkedForDeletion.size() && !m_isDeletionScheduled) {
861         m_nodesToDelete.appendVector(m_nodesMarkedForDeletion);
862         m_nodesMarkedForDeletion.clear();
863
864         m_isDeletionScheduled = true;
865
866         RefPtr<AudioContext> strongThis(this);
867         callOnMainThread([strongThis] {
868             strongThis->deleteMarkedNodes();
869         });
870     }
871 }
872
873 void AudioContext::deleteMarkedNodes()
874 {
875     ASSERT(isMainThread());
876
877     // Protect this object from being deleted before we release the mutex locked by AutoLocker.
878     Ref<AudioContext> protect(*this);
879     {
880         AutoLocker locker(*this);
881
882         while (m_nodesToDelete.size()) {
883             AudioNode* node = m_nodesToDelete.takeLast();
884
885             // Before deleting the node, clear out any AudioNodeInputs from m_dirtySummingJunctions.
886             unsigned numberOfInputs = node->numberOfInputs();
887             for (unsigned i = 0; i < numberOfInputs; ++i)
888                 m_dirtySummingJunctions.remove(node->input(i));
889
890             // Before deleting the node, clear out any AudioNodeOutputs from m_dirtyAudioNodeOutputs.
891             unsigned numberOfOutputs = node->numberOfOutputs();
892             for (unsigned i = 0; i < numberOfOutputs; ++i)
893                 m_dirtyAudioNodeOutputs.remove(node->output(i));
894
895             // Finally, delete it.
896             delete node;
897         }
898         m_isDeletionScheduled = false;
899     }
900 }
901
902 void AudioContext::markSummingJunctionDirty(AudioSummingJunction* summingJunction)
903 {
904     ASSERT(isGraphOwner());    
905     m_dirtySummingJunctions.add(summingJunction);
906 }
907
908 void AudioContext::removeMarkedSummingJunction(AudioSummingJunction* summingJunction)
909 {
910     ASSERT(isMainThread());
911     AutoLocker locker(*this);
912     m_dirtySummingJunctions.remove(summingJunction);
913 }
914
915 void AudioContext::markAudioNodeOutputDirty(AudioNodeOutput* output)
916 {
917     ASSERT(isGraphOwner());    
918     m_dirtyAudioNodeOutputs.add(output);
919 }
920
921 void AudioContext::handleDirtyAudioSummingJunctions()
922 {
923     ASSERT(isGraphOwner());    
924
925     for (HashSet<AudioSummingJunction*>::iterator i = m_dirtySummingJunctions.begin(); i != m_dirtySummingJunctions.end(); ++i)
926         (*i)->updateRenderingState();
927
928     m_dirtySummingJunctions.clear();
929 }
930
931 void AudioContext::handleDirtyAudioNodeOutputs()
932 {
933     ASSERT(isGraphOwner());    
934
935     for (HashSet<AudioNodeOutput*>::iterator i = m_dirtyAudioNodeOutputs.begin(); i != m_dirtyAudioNodeOutputs.end(); ++i)
936         (*i)->updateRenderingState();
937
938     m_dirtyAudioNodeOutputs.clear();
939 }
940
941 void AudioContext::addAutomaticPullNode(AudioNode* node)
942 {
943     ASSERT(isGraphOwner());
944
945     if (m_automaticPullNodes.add(node).isNewEntry)
946         m_automaticPullNodesNeedUpdating = true;
947 }
948
949 void AudioContext::removeAutomaticPullNode(AudioNode* node)
950 {
951     ASSERT(isGraphOwner());
952
953     if (m_automaticPullNodes.remove(node))
954         m_automaticPullNodesNeedUpdating = true;
955 }
956
957 void AudioContext::updateAutomaticPullNodes()
958 {
959     ASSERT(isGraphOwner());
960
961     if (m_automaticPullNodesNeedUpdating) {
962         // Copy from m_automaticPullNodes to m_renderingAutomaticPullNodes.
963         m_renderingAutomaticPullNodes.resize(m_automaticPullNodes.size());
964
965         unsigned j = 0;
966         for (HashSet<AudioNode*>::iterator i = m_automaticPullNodes.begin(); i != m_automaticPullNodes.end(); ++i, ++j) {
967             AudioNode* output = *i;
968             m_renderingAutomaticPullNodes[j] = output;
969         }
970
971         m_automaticPullNodesNeedUpdating = false;
972     }
973 }
974
975 void AudioContext::processAutomaticPullNodes(size_t framesToProcess)
976 {
977     ASSERT(isAudioThread());
978
979     for (unsigned i = 0; i < m_renderingAutomaticPullNodes.size(); ++i)
980         m_renderingAutomaticPullNodes[i]->processIfNecessary(framesToProcess);
981 }
982
983 ScriptExecutionContext* AudioContext::scriptExecutionContext() const
984 {
985     return m_isStopScheduled ? 0 : ActiveDOMObject::scriptExecutionContext();
986 }
987
988 void AudioContext::nodeWillBeginPlayback()
989 {
990     // Called by scheduled AudioNodes when clients schedule their start times.
991     // Prior to the introduction of suspend(), resume(), and stop(), starting
992     // a scheduled AudioNode would remove the user-gesture restriction, if present,
993     // and would thus unmute the context. Now that AudioContext stays in the
994     // "suspended" state if a user-gesture restriction is present, starting a
995     // schedule AudioNode should set the state to "running", but only if the
996     // user-gesture restriction is set.
997     if (userGestureRequiredForAudioStart())
998         startRendering();
999 }
1000
1001 bool AudioContext::willBeginPlayback()
1002 {
1003     if (userGestureRequiredForAudioStart()) {
1004         if (!ScriptController::processingUserGesture())
1005             return false;
1006         removeBehaviorRestriction(AudioContext::RequireUserGestureForAudioStartRestriction);
1007     }
1008
1009     if (pageConsentRequiredForAudioStart()) {
1010         Page* page = document()->page();
1011         if (page && !page->canStartMedia()) {
1012             document()->addMediaCanStartListener(this);
1013             return false;
1014         }
1015         removeBehaviorRestriction(AudioContext::RequirePageConsentForAudioStartRestriction);
1016     }
1017
1018     return m_mediaSession->clientWillBeginPlayback();
1019 }
1020
1021 bool AudioContext::willPausePlayback()
1022 {
1023     if (userGestureRequiredForAudioStart()) {
1024         if (!ScriptController::processingUserGesture())
1025             return false;
1026         removeBehaviorRestriction(AudioContext::RequireUserGestureForAudioStartRestriction);
1027     }
1028
1029     if (pageConsentRequiredForAudioStart()) {
1030         Page* page = document()->page();
1031         if (page && !page->canStartMedia()) {
1032             document()->addMediaCanStartListener(this);
1033             return false;
1034         }
1035         removeBehaviorRestriction(AudioContext::RequirePageConsentForAudioStartRestriction);
1036     }
1037     
1038     return m_mediaSession->clientWillPausePlayback();
1039 }
1040
1041 void AudioContext::startRendering()
1042 {
1043     if (!willBeginPlayback())
1044         return;
1045
1046     destination()->startRendering();
1047     setState(State::Running);
1048 }
1049
1050 void AudioContext::mediaCanStart()
1051 {
1052     removeBehaviorRestriction(AudioContext::RequirePageConsentForAudioStartRestriction);
1053 }
1054
1055 MediaProducer::MediaStateFlags AudioContext::mediaState() const
1056 {
1057     if (!m_isStopScheduled && m_destinationNode && m_destinationNode->isPlayingAudio())
1058         return MediaProducer::IsPlayingAudio;
1059
1060     return MediaProducer::IsNotPlaying;
1061 }
1062
1063 void AudioContext::pageMutedStateDidChange()
1064 {
1065     if (m_destinationNode && document()->page())
1066         m_destinationNode->setMuted(document()->page()->isMuted());
1067 }
1068
1069 void AudioContext::isPlayingAudioDidChange()
1070 {
1071     document()->updateIsPlayingMedia();
1072 }
1073
1074 void AudioContext::fireCompletionEvent()
1075 {
1076     ASSERT(isMainThread());
1077     if (!isMainThread())
1078         return;
1079         
1080     AudioBuffer* renderedBuffer = m_renderTarget.get();
1081     setState(State::Closed);
1082
1083     ASSERT(renderedBuffer);
1084     if (!renderedBuffer)
1085         return;
1086
1087     // Avoid firing the event if the document has already gone away.
1088     if (scriptExecutionContext()) {
1089         // Call the offline rendering completion event listener.
1090         m_eventQueue->enqueueEvent(OfflineAudioCompletionEvent::create(renderedBuffer));
1091     }
1092 }
1093
1094 void AudioContext::incrementActiveSourceCount()
1095 {
1096     ++m_activeSourceCount;
1097 }
1098
1099 void AudioContext::decrementActiveSourceCount()
1100 {
1101     --m_activeSourceCount;
1102 }
1103
1104 void AudioContext::suspendContext(std::function<void()> successCallback, FailureCallback failureCallback)
1105 {
1106     ASSERT(successCallback);
1107     ASSERT(failureCallback);
1108
1109     if (isOfflineContext()) {
1110         failureCallback(INVALID_STATE_ERR);
1111         return;
1112     }
1113
1114     if (m_state == State::Suspended) {
1115         successCallback();
1116         return;
1117     }
1118
1119     if (m_state == State::Closed || m_state == State::Interrupted || !m_destinationNode) {
1120         failureCallback(0);
1121         return;
1122     }
1123
1124     addReaction(State::Suspended, successCallback);
1125
1126     if (!willPausePlayback())
1127         return;
1128
1129     lazyInitialize();
1130
1131     RefPtr<AudioContext> strongThis(this);
1132     m_destinationNode->suspend([strongThis] {
1133         strongThis->setState(State::Suspended);
1134     });
1135 }
1136
1137 void AudioContext::resumeContext(std::function<void()> successCallback, FailureCallback failureCallback)
1138 {
1139     ASSERT(successCallback);
1140     ASSERT(failureCallback);
1141
1142     if (isOfflineContext()) {
1143         failureCallback(INVALID_STATE_ERR);
1144         return;
1145     }
1146
1147     if (m_state == State::Running) {
1148         successCallback();
1149         return;
1150     }
1151
1152     if (m_state == State::Closed || !m_destinationNode) {
1153         failureCallback(0);
1154         return;
1155     }
1156
1157     addReaction(State::Running, successCallback);
1158
1159     if (!willBeginPlayback())
1160         return;
1161
1162     lazyInitialize();
1163
1164     RefPtr<AudioContext> strongThis(this);
1165     m_destinationNode->resume([strongThis] {
1166         strongThis->setState(State::Running);
1167     });
1168 }
1169
1170 void AudioContext::closeContext(std::function<void()> successCallback, FailureCallback failureCallback)
1171 {
1172     ASSERT(successCallback);
1173     ASSERT(failureCallback);
1174
1175     if (isOfflineContext()) {
1176         failureCallback(INVALID_STATE_ERR);
1177         return;
1178     }
1179
1180     if (m_state == State::Closed || !m_destinationNode) {
1181         successCallback();
1182         return;
1183     }
1184
1185     addReaction(State::Closed, successCallback);
1186
1187     lazyInitialize();
1188
1189     RefPtr<AudioContext> strongThis(this);
1190     m_destinationNode->close([strongThis, successCallback] {
1191         strongThis->setState(State::Closed);
1192         strongThis->uninitialize();
1193     });
1194 }
1195
1196
1197 void AudioContext::suspendPlayback()
1198 {
1199     if (!m_destinationNode || m_state == State::Closed)
1200         return;
1201
1202     if (m_state == State::Suspended) {
1203         if (m_mediaSession->state() == PlatformMediaSession::Interrupted)
1204             setState(State::Interrupted);
1205         return;
1206     }
1207
1208     lazyInitialize();
1209
1210     RefPtr<AudioContext> strongThis(this);
1211     m_destinationNode->suspend([strongThis] {
1212         bool interrupted = strongThis->m_mediaSession->state() == PlatformMediaSession::Interrupted;
1213         strongThis->setState(interrupted ? State::Interrupted : State::Suspended);
1214     });
1215 }
1216
1217 void AudioContext::mayResumePlayback(bool shouldResume)
1218 {
1219     if (!m_destinationNode || m_state == State::Closed || m_state == State::Running)
1220         return;
1221
1222     if (!shouldResume) {
1223         setState(State::Suspended);
1224         return;
1225     }
1226
1227     if (!willBeginPlayback())
1228         return;
1229
1230     lazyInitialize();
1231
1232     RefPtr<AudioContext> strongThis(this);
1233     m_destinationNode->resume([strongThis] {
1234         strongThis->setState(State::Running);
1235     });
1236 }
1237
1238
1239 } // namespace WebCore
1240
1241 #endif // ENABLE(WEB_AUDIO)