2 * Copyright (C) 2010, Google Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
15 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
16 * DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
17 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
18 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
19 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
20 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
21 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
22 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 #include "AudioBufferSourceNode.h"
31 #include "AudioContext.h"
32 #include "AudioNodeOutput.h"
34 #include <wtf/MathExtras.h>
40 const double DefaultGrainDuration = 0.020; // 20ms
41 const double UnknownTime = -1;
43 PassRefPtr<AudioBufferSourceNode> AudioBufferSourceNode::create(AudioContext* context, double sampleRate)
45 return adoptRef(new AudioBufferSourceNode(context, sampleRate));
48 AudioBufferSourceNode::AudioBufferSourceNode(AudioContext* context, double sampleRate)
49 : AudioSourceNode(context, sampleRate)
53 , m_hasFinished(false)
55 , m_endTime(UnknownTime)
56 , m_schedulingFrameDelay(0)
60 , m_grainDuration(DefaultGrainDuration)
61 , m_grainFrameCount(0)
65 setType(NodeTypeAudioBufferSource);
67 m_gain = AudioGain::create("gain", 1.0, 0.0, 1.0);
68 m_playbackRate = AudioParam::create("playbackRate", 1.0, 0.0, AudioResampler::MaxRate);
70 m_gain->setContext(context);
71 m_playbackRate->setContext(context);
73 // Default to mono. A call to setBuffer() will set the number of output channels to that of the buffer.
74 addOutput(adoptPtr(new AudioNodeOutput(this, 1)));
79 AudioBufferSourceNode::~AudioBufferSourceNode()
84 void AudioBufferSourceNode::process(size_t framesToProcess)
86 AudioBus* outputBus = output(0)->bus();
88 if (!isInitialized()) {
93 // The audio thread can't block on this lock, so we call tryLock() instead.
94 // Careful - this is a tryLock() and not an autolocker, so we must unlock() before every return.
95 if (m_processLock.tryLock()) {
96 // Check if it's time to start playing.
97 double sampleRate = this->sampleRate();
98 double pitchRate = totalPitchRate();
99 double quantumStartTime = context()->currentTime();
100 double quantumEndTime = quantumStartTime + framesToProcess / sampleRate;
102 // If we know the end time and it's already passed, then don't bother doing any more rendering this cycle.
103 if (m_endTime != UnknownTime && m_endTime <= quantumStartTime) {
109 if (!m_isPlaying || m_hasFinished || !buffer() || m_startTime >= quantumEndTime) {
110 // FIXME: can optimize here by propagating silent hint instead of forcing the whole chain to process silence.
112 m_processLock.unlock();
116 // Handle sample-accurate scheduling so that buffer playback will happen at a very precise time.
117 m_schedulingFrameDelay = 0;
118 if (m_startTime >= quantumStartTime) {
119 // m_schedulingFrameDelay is set here only the very first render quantum (because of above check: m_startTime >= quantumEndTime)
120 // So: quantumStartTime <= m_startTime < quantumEndTime
121 ASSERT(m_startTime < quantumEndTime);
123 double startTimeInQuantum = m_startTime - quantumStartTime;
124 double startFrameInQuantum = startTimeInQuantum * sampleRate;
126 // m_schedulingFrameDelay is used in provideInput(), so factor in the current playback pitch rate.
127 m_schedulingFrameDelay = static_cast<int>(pitchRate * startFrameInQuantum);
130 // FIXME: optimization opportunity:
131 // With a bit of work, it should be possible to avoid going through the resampler completely when the pitchRate == 1,
132 // especially if the pitchRate has never deviated from 1 in the past.
134 // Read the samples through the pitch resampler. Our provideInput() method will be called by the resampler.
135 m_resampler.setRate(pitchRate);
136 m_resampler.process(this, outputBus, framesToProcess);
138 // Apply the gain (in-place) to the output bus.
139 double totalGain = gain()->value() * m_buffer->gain();
140 outputBus->copyWithGainFrom(*outputBus, &m_lastGain, totalGain);
142 // If the end time is somewhere in the middle of this time quantum, then simply zero out the
143 // frames starting at the end time.
144 if (m_endTime != UnknownTime && m_endTime >= quantumStartTime && m_endTime < quantumEndTime) {
145 unsigned zeroStartFrame = (m_endTime - quantumStartTime) * sampleRate;
146 unsigned framesToZero = framesToProcess - zeroStartFrame;
148 bool isSafe = zeroStartFrame < framesToProcess && framesToZero <= framesToProcess && zeroStartFrame + framesToZero <= framesToProcess;
152 for (unsigned i = 0; i < outputBus->numberOfChannels(); ++i)
153 memset(outputBus->channel(i)->data() + zeroStartFrame, 0, sizeof(float) * framesToZero);
161 m_processLock.unlock();
163 // Too bad - the tryLock() failed. We must be in the middle of changing buffers and were already outputting silence anyway.
168 // The resampler calls us back here to get the input samples from our buffer.
169 void AudioBufferSourceNode::provideInput(AudioBus* bus, size_t numberOfFrames)
171 ASSERT(context()->isAudioThread());
173 // Basic sanity checking
176 if (!bus || !buffer())
179 unsigned numberOfChannels = this->numberOfChannels();
180 unsigned busNumberOfChannels = bus->numberOfChannels();
182 // FIXME: we can add support for sources with more than two channels, but this is not a common case.
183 bool channelCountGood = numberOfChannels == busNumberOfChannels && (numberOfChannels == 1 || numberOfChannels == 2);
184 ASSERT(channelCountGood);
185 if (!channelCountGood)
188 // Get the destination pointers.
189 float* destinationL = bus->channel(0)->data();
190 ASSERT(destinationL);
193 float* destinationR = (numberOfChannels < 2) ? 0 : bus->channel(1)->data();
195 size_t bufferLength = buffer()->length();
196 double bufferSampleRate = buffer()->sampleRate();
198 // Calculate the start and end frames in our buffer that we want to play.
199 // If m_isGrain is true, then we will be playing a portion of the total buffer.
200 unsigned startFrame = m_isGrain ? static_cast<unsigned>(m_grainOffset * bufferSampleRate) : 0;
201 unsigned endFrame = m_isGrain ? static_cast<unsigned>(startFrame + m_grainDuration * bufferSampleRate) : bufferLength;
203 // This is a HACK to allow for HRTF tail-time - avoids glitch at end.
204 // FIXME: implement tailTime for each AudioNode for a more general solution to this problem.
208 // Do some sanity checking.
209 if (startFrame >= bufferLength)
210 startFrame = !bufferLength ? 0 : bufferLength - 1;
211 if (endFrame > bufferLength)
212 endFrame = bufferLength;
213 if (m_readIndex >= endFrame)
214 m_readIndex = startFrame; // reset to start
216 int framesToProcess = numberOfFrames;
218 // Handle sample-accurate scheduling so that we play the buffer at a very precise time.
219 // m_schedulingFrameDelay will only be non-zero the very first time that provideInput() is called, which corresponds
220 // with the very start of the buffer playback.
221 if (m_schedulingFrameDelay > 0) {
222 ASSERT(m_schedulingFrameDelay <= framesToProcess);
223 if (m_schedulingFrameDelay <= framesToProcess) {
224 // Generate silence for the initial portion of the destination.
225 memset(destinationL, 0, sizeof(float) * m_schedulingFrameDelay);
226 destinationL += m_schedulingFrameDelay;
228 memset(destinationR, 0, sizeof(float) * m_schedulingFrameDelay);
229 destinationR += m_schedulingFrameDelay;
232 // Since we just generated silence for the initial portion, we have fewer frames to provide.
233 framesToProcess -= m_schedulingFrameDelay;
237 // We have to generate a certain number of output sample-frames, but we need to handle the case where we wrap around
238 // from the end of the buffer to the start if playing back with looping and also the case where we simply reach the
239 // end of the sample data, but haven't yet rendered numberOfFrames worth of output.
240 while (framesToProcess > 0) {
241 ASSERT(m_readIndex <= endFrame);
242 if (m_readIndex > endFrame)
245 // Figure out how many frames we can process this time.
246 int framesAvailable = endFrame - m_readIndex;
247 int framesThisTime = min(framesToProcess, framesAvailable);
249 // Create the destination bus for the part of the destination we're processing this time.
250 AudioBus currentDestinationBus(busNumberOfChannels, framesThisTime, false);
251 currentDestinationBus.setChannelMemory(0, destinationL, framesThisTime);
252 if (busNumberOfChannels > 1)
253 currentDestinationBus.setChannelMemory(1, destinationR, framesThisTime);
255 // Generate output from the buffer.
256 readFromBuffer(¤tDestinationBus, framesThisTime);
258 // Update the destination pointers.
259 destinationL += framesThisTime;
260 if (busNumberOfChannels > 1)
261 destinationR += framesThisTime;
263 framesToProcess -= framesThisTime;
265 // Handle the case where we reach the end of the part of the sample data we're supposed to play for the buffer.
266 if (m_readIndex >= endFrame) {
267 m_readIndex = startFrame;
268 m_grainFrameCount = 0;
271 // If we're not looping, then stop playing when we get to the end.
274 if (framesToProcess > 0) {
275 // We're not looping and we've reached the end of the sample data, but we still need to provide more output,
276 // so generate silence for the remaining.
277 memset(destinationL, 0, sizeof(float) * framesToProcess);
280 memset(destinationR, 0, sizeof(float) * framesToProcess);
290 void AudioBufferSourceNode::readFromBuffer(AudioBus* destinationBus, size_t framesToProcess)
292 bool isBusGood = destinationBus && destinationBus->length() == framesToProcess && destinationBus->numberOfChannels() == numberOfChannels();
297 unsigned numberOfChannels = this->numberOfChannels();
298 // FIXME: we can add support for sources with more than two channels, but this is not a common case.
299 bool channelCountGood = numberOfChannels == 1 || numberOfChannels == 2;
300 ASSERT(channelCountGood);
301 if (!channelCountGood)
304 // Get pointers to the start of the sample buffer.
305 float* sourceL = m_buffer->getChannelData(0)->data();
306 float* sourceR = m_buffer->numberOfChannels() == 2 ? m_buffer->getChannelData(1)->data() : 0;
308 // Sanity check buffer access.
309 bool isSourceGood = sourceL && (numberOfChannels == 1 || sourceR) && m_readIndex + framesToProcess <= m_buffer->length();
310 ASSERT(isSourceGood);
314 // Offset the pointers to the current read position in the sample buffer.
315 sourceL += m_readIndex;
316 sourceR += m_readIndex;
318 // Get pointers to the destination.
319 float* destinationL = destinationBus->channel(0)->data();
320 float* destinationR = numberOfChannels == 2 ? destinationBus->channel(1)->data() : 0;
321 bool isDestinationGood = destinationL && (numberOfChannels == 1 || destinationR);
322 ASSERT(isDestinationGood);
323 if (!isDestinationGood)
327 readFromBufferWithGrainEnvelope(sourceL, sourceR, destinationL, destinationR, framesToProcess);
329 // Simply copy the data from the source buffer to the destination.
330 memcpy(destinationL, sourceL, sizeof(float) * framesToProcess);
331 if (numberOfChannels == 2)
332 memcpy(destinationR, sourceR, sizeof(float) * framesToProcess);
335 // Advance the buffer's read index.
336 m_readIndex += framesToProcess;
339 void AudioBufferSourceNode::readFromBufferWithGrainEnvelope(float* sourceL, float* sourceR, float* destinationL, float* destinationR, size_t framesToProcess)
341 ASSERT(sourceL && destinationL);
342 if (!sourceL || !destinationL)
345 int grainFrameLength = static_cast<int>(m_grainDuration * m_buffer->sampleRate());
346 bool isStereo = sourceR && destinationR;
348 int n = framesToProcess;
350 // Apply the grain envelope.
351 float x = static_cast<float>(m_grainFrameCount) / static_cast<float>(grainFrameLength);
355 float grainEnvelope = sinf(piFloat * x);
357 *destinationL++ = grainEnvelope * *sourceL++;
360 *destinationR++ = grainEnvelope * *sourceR++;
364 void AudioBufferSourceNode::reset()
368 m_grainFrameCount = 0;
369 m_lastGain = gain()->value();
372 void AudioBufferSourceNode::finish()
374 if (!m_hasFinished) {
375 // Let the context dereference this AudioNode.
376 context()->notifyNodeFinishedProcessing(this);
377 m_hasFinished = true;
381 void AudioBufferSourceNode::setBuffer(AudioBuffer* buffer)
383 ASSERT(isMainThread());
385 // The context must be locked since changing the buffer can re-configure the number of channels that are output.
386 AudioContext::AutoLocker contextLocker(context());
388 // This synchronizes with process().
389 MutexLocker processLocker(m_processLock);
392 // Do any necesssary re-configuration to the buffer's number of channels.
393 unsigned numberOfChannels = buffer->numberOfChannels();
394 m_resampler.configureChannels(numberOfChannels);
395 output(0)->setNumberOfChannels(numberOfChannels);
402 unsigned AudioBufferSourceNode::numberOfChannels()
404 return output(0)->numberOfChannels();
407 void AudioBufferSourceNode::noteOn(double when)
409 ASSERT(isMainThread());
419 void AudioBufferSourceNode::noteGrainOn(double when, double grainOffset, double grainDuration)
421 ASSERT(isMainThread());
428 // Do sanity checking of grain parameters versus buffer size.
429 double bufferDuration = buffer()->duration();
431 if (grainDuration > bufferDuration)
432 return; // FIXME: maybe should throw exception - consider in specification.
434 double maxGrainOffset = bufferDuration - grainDuration;
435 maxGrainOffset = max(0.0, maxGrainOffset);
437 grainOffset = max(0.0, grainOffset);
438 grainOffset = min(maxGrainOffset, grainOffset);
439 m_grainOffset = grainOffset;
441 m_grainDuration = grainDuration;
442 m_grainFrameCount = 0;
446 m_readIndex = static_cast<int>(m_grainOffset * buffer()->sampleRate());
450 void AudioBufferSourceNode::noteOff(double when)
452 ASSERT(isMainThread());
456 when = max(0.0, when);
460 double AudioBufferSourceNode::totalPitchRate()
462 double dopplerRate = 1.0;
463 if (m_pannerNode.get())
464 dopplerRate = m_pannerNode->dopplerRate();
466 // Incorporate buffer's sample-rate versus AudioContext's sample-rate.
467 // Normally it's not an issue because buffers are loaded at the AudioContext's sample-rate, but we can handle it in any case.
468 double sampleRateFactor = 1.0;
470 sampleRateFactor = buffer()->sampleRate() / sampleRate();
472 double basePitchRate = playbackRate()->value();
474 double totalRate = dopplerRate * sampleRateFactor * basePitchRate;
476 // Sanity check the total rate. It's very important that the resampler not get any bad rate values.
477 totalRate = max(0.0, totalRate);
478 totalRate = min(AudioResampler::MaxRate, totalRate);
480 bool isTotalRateValid = !isnan(totalRate) && !isinf(totalRate);
481 ASSERT(isTotalRateValid);
482 if (!isTotalRateValid)
488 } // namespace WebCore
490 #endif // ENABLE(WEB_AUDIO)