Make WebAudio API const-correct.
authorjer.noble@apple.com <jer.noble@apple.com@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Thu, 19 Jan 2012 20:01:09 +0000 (20:01 +0000)
committerjer.noble@apple.com <jer.noble@apple.com@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Thu, 19 Jan 2012 20:01:09 +0000 (20:01 +0000)
https://bugs.webkit.org/show_bug.cgi?id=76573

Reviewed by Daniel Bates.

Source/WebCore:

No new tests; no net change in functionality, so covered by existing tests.

The non-const data() accessor was renamed mutableData() to expose const-correctness
bugs during compile time:
* platform/audio/AudioChannel.h:
(WebCore::AudioChannel::mutableData):

The following functions were made const correct:
* platform/audio/AudioArray.h:
(WebCore::AudioArray::copyToRange):
* platform/audio/AudioBus.h:
(WebCore::AudioBus::createBufferFromRange):
(WebCore::AudioBus::createBySampleRateConverting):
(WebCore::AudioBus::createByMixingToMono):
* platform/audio/FFTConvolver.cpp:
(WebCore::FFTConvolver::process):
* platform/audio/FFTConvolver.h:
* platform/audio/FFTFrame.cpp:
(WebCore::FFTFrame::doPaddedFFT):
(WebCore::FFTFrame::doFFT):
* platform/audio/FFTFrame.h:
* platform/audio/ReverbConvolverStage.cpp:
(WebCore::ReverbConvolverStage::ReverbConvolverStage):
(WebCore::ReverbConvolverStage::process):
* platform/audio/ReverbConvolverStage.h:
* platform/audio/ReverbInputBuffer.cpp:
(WebCore::ReverbInputBuffer::write):
* platform/audio/ReverbInputBuffer.h:
* platform/audio/SincResampler.cpp:
(WebCore::SincResampler::process):
* platform/audio/SincResampler.h:
* platform/audio/ZeroPole.cpp:
(WebCore::ZeroPole::process):
* platform/audio/ZeroPole.h:
* platform/audio/AudioBus.cpp:
(WebCore::AudioBus::channelByType):
* platform/audio/AudioBus.h:
(WebCore::AudioBus::gain):
* platform/audio/AudioDSPKernelProcessor.cpp:
(WebCore::AudioDSPKernelProcessor::process):
* platform/audio/AudioDSPKernelProcessor.h:
* platform/audio/AudioProcessor.h:
* platform/audio/DynamicsCompressor.cpp:
(WebCore::DynamicsCompressor::process):
* platform/audio/DynamicsCompressor.h:
* platform/audio/DynamicsCompressorKernel.cpp:
(WebCore::DynamicsCompressorKernel::process):
* platform/audio/DynamicsCompressorKernel.h:
* platform/audio/EqualPowerPanner.cpp:
(WebCore::EqualPowerPanner::pan):
* platform/audio/EqualPowerPanner.h:
* platform/audio/HRTFElevation.h:
(WebCore::HRTFElevation::numberOfAzimuths):
* platform/audio/HRTFPanner.cpp:
(WebCore::HRTFPanner::pan):
* platform/audio/HRTFPanner.h:
* platform/audio/Panner.h:
* platform/audio/Reverb.cpp:
(WebCore::Reverb::process):
* platform/audio/Reverb.h:
* platform/audio/ReverbConvolver.cpp:
(WebCore::ReverbConvolver::process):
* platform/audio/ReverbConvolver.h:
* platform/audio/ffmpeg/FFTFrameFFMPEG.cpp:
(WebCore::FFTFrame::doFFT):
* platform/audio/mkl/FFTFrameMKL.cpp:
(WebCore::FFTFrame::doFFT):

The following functions were modified to use the renamed mutableData() accessor:
* platform/audio/AudioBus.cpp:
(WebCore::AudioBus::processWithGainFromMonoStereo):
(WebCore::AudioBus::copyWithSampleAccurateGainValuesFrom):
* platform/audio/AudioChannel.cpp:
(WebCore::AudioChannel::scale):
(WebCore::AudioChannel::copyFrom):
(WebCore::AudioChannel::copyFromRange):
(WebCore::AudioChannel::sumFrom):
* platform/audio/AudioDSPKernelProcessor.cpp:
(WebCore::AudioDSPKernelProcessor::process):
* platform/audio/AudioResampler.cpp:
(WebCore::AudioResampler::process):
* platform/audio/DynamicsCompressor.cpp:
(WebCore::DynamicsCompressor::process):
* platform/audio/EqualPowerPanner.cpp:
(WebCore::EqualPowerPanner::pan):
* platform/audio/HRTFKernel.cpp:
(WebCore::extractAverageGroupDelay):
(WebCore::HRTFKernel::HRTFKernel):
(WebCore::HRTFKernel::createImpulseResponse):
* platform/audio/HRTFPanner.cpp:
(WebCore::HRTFPanner::pan):
* platform/audio/MultiChannelResampler.cpp:
(WebCore::MultiChannelResampler::process):
* platform/audio/Reverb.cpp:
(WebCore::Reverb::process):
* platform/audio/ReverbConvolver.cpp:
(WebCore::ReverbConvolver::ReverbConvolver):
(WebCore::ReverbConvolver::process):
* platform/audio/mac/AudioFileReaderMac.cpp:
(WebCore::AudioFileReader::createBus):
* platform/audio/mac/FFTFrameMac.cpp:
(WebCore::FFTFrame::doFFT):
* webaudio/AudioBufferSourceNode.cpp:
(WebCore::AudioBufferSourceNode::process):
(WebCore::AudioBufferSourceNode::renderFromBuffer):
* webaudio/BiquadProcessor.cpp:
(WebCore::BiquadProcessor::process):
* webaudio/JavaScriptAudioNode.cpp:
(WebCore::JavaScriptAudioNode::process):
* webaudio/OfflineAudioDestinationNode.cpp:
(WebCore::OfflineAudioDestinationNode::render):
* webaudio/RealtimeAnalyser.cpp:
(WebCore::RealtimeAnalyser::writeInput):
* webaudio/WaveShaperProcessor.cpp:
(WebCore::WaveShaperProcessor::process):

Source/WebKit/chromium:

The following functions were modified to use the renamed mutableData() accessor:
* src/AudioDestinationChromium.cpp:
(WebCore::AudioDestinationChromium::FIFO::fillBuffer):
(WebCore::AudioDestinationChromium::FIFO::consume):
* src/WebAudioData.cpp:
(WebCore::WebAudioBus::channelData):
* src/WebMediaPlayerClientImpl.cpp:
(WebKit::WebMediaPlayerClientImpl::AudioSourceProviderImpl::provideInput):

git-svn-id: https://svn.webkit.org/repository/webkit/trunk@105431 268f45cc-cd09-0410-ab3c-d52691b4dbfc

54 files changed:
Source/WebCore/ChangeLog
Source/WebCore/platform/audio/AudioArray.h
Source/WebCore/platform/audio/AudioBus.cpp
Source/WebCore/platform/audio/AudioBus.h
Source/WebCore/platform/audio/AudioChannel.cpp
Source/WebCore/platform/audio/AudioChannel.h
Source/WebCore/platform/audio/AudioDSPKernelProcessor.cpp
Source/WebCore/platform/audio/AudioDSPKernelProcessor.h
Source/WebCore/platform/audio/AudioProcessor.h
Source/WebCore/platform/audio/AudioResampler.cpp
Source/WebCore/platform/audio/DynamicsCompressor.cpp
Source/WebCore/platform/audio/DynamicsCompressor.h
Source/WebCore/platform/audio/DynamicsCompressorKernel.cpp
Source/WebCore/platform/audio/DynamicsCompressorKernel.h
Source/WebCore/platform/audio/EqualPowerPanner.cpp
Source/WebCore/platform/audio/EqualPowerPanner.h
Source/WebCore/platform/audio/FFTConvolver.cpp
Source/WebCore/platform/audio/FFTConvolver.h
Source/WebCore/platform/audio/FFTFrame.cpp
Source/WebCore/platform/audio/FFTFrame.h
Source/WebCore/platform/audio/HRTFElevation.h
Source/WebCore/platform/audio/HRTFKernel.cpp
Source/WebCore/platform/audio/HRTFPanner.cpp
Source/WebCore/platform/audio/HRTFPanner.h
Source/WebCore/platform/audio/MultiChannelResampler.cpp
Source/WebCore/platform/audio/Panner.h
Source/WebCore/platform/audio/Reverb.cpp
Source/WebCore/platform/audio/Reverb.h
Source/WebCore/platform/audio/ReverbConvolver.cpp
Source/WebCore/platform/audio/ReverbConvolver.h
Source/WebCore/platform/audio/ReverbConvolverStage.cpp
Source/WebCore/platform/audio/ReverbConvolverStage.h
Source/WebCore/platform/audio/ReverbInputBuffer.cpp
Source/WebCore/platform/audio/ReverbInputBuffer.h
Source/WebCore/platform/audio/SincResampler.cpp
Source/WebCore/platform/audio/SincResampler.h
Source/WebCore/platform/audio/ZeroPole.cpp
Source/WebCore/platform/audio/ZeroPole.h
Source/WebCore/platform/audio/ffmpeg/FFTFrameFFMPEG.cpp
Source/WebCore/platform/audio/mac/AudioFileReaderMac.cpp
Source/WebCore/platform/audio/mac/FFTFrameMac.cpp
Source/WebCore/platform/audio/mkl/FFTFrameMKL.cpp
Source/WebCore/webaudio/AudioBufferSourceNode.cpp
Source/WebCore/webaudio/BiquadProcessor.cpp
Source/WebCore/webaudio/BiquadProcessor.h
Source/WebCore/webaudio/JavaScriptAudioNode.cpp
Source/WebCore/webaudio/OfflineAudioDestinationNode.cpp
Source/WebCore/webaudio/RealtimeAnalyser.cpp
Source/WebCore/webaudio/WaveShaperProcessor.cpp
Source/WebCore/webaudio/WaveShaperProcessor.h
Source/WebKit/chromium/ChangeLog
Source/WebKit/chromium/src/AudioDestinationChromium.cpp
Source/WebKit/chromium/src/WebAudioBus.cpp
Source/WebKit/chromium/src/WebMediaPlayerClientImpl.cpp

index bde6845..7b66da0 100755 (executable)
@@ -1,3 +1,126 @@
+2012-01-18  Jer Noble  <jer.noble@apple.com>
+
+        Make WebAudio API const-correct.
+        https://bugs.webkit.org/show_bug.cgi?id=76573
+
+        Reviewed by Daniel Bates.
+
+        No new tests; no net change in functionality, so covered by existing tests.
+
+        The non-const data() accessor was renamed mutableData() to expose const-correctness
+        bugs during compile time:
+        * platform/audio/AudioChannel.h:
+        (WebCore::AudioChannel::mutableData):
+
+        The following functions were made const correct:
+        * platform/audio/AudioArray.h:
+        (WebCore::AudioArray::copyToRange):
+        * platform/audio/AudioBus.h:
+        (WebCore::AudioBus::createBufferFromRange):
+        (WebCore::AudioBus::createBySampleRateConverting):
+        (WebCore::AudioBus::createByMixingToMono):
+        * platform/audio/FFTConvolver.cpp:
+        (WebCore::FFTConvolver::process):
+        * platform/audio/FFTConvolver.h:
+        * platform/audio/FFTFrame.cpp:
+        (WebCore::FFTFrame::doPaddedFFT):
+        (WebCore::FFTFrame::doFFT):
+        * platform/audio/FFTFrame.h:
+        * platform/audio/ReverbConvolverStage.cpp:
+        (WebCore::ReverbConvolverStage::ReverbConvolverStage):
+        (WebCore::ReverbConvolverStage::process):
+        * platform/audio/ReverbConvolverStage.h:
+        * platform/audio/ReverbInputBuffer.cpp:
+        (WebCore::ReverbInputBuffer::write):
+        * platform/audio/ReverbInputBuffer.h:
+        * platform/audio/SincResampler.cpp:
+        (WebCore::SincResampler::process):
+        * platform/audio/SincResampler.h:
+        * platform/audio/ZeroPole.cpp:
+        (WebCore::ZeroPole::process):
+        * platform/audio/ZeroPole.h:
+        * platform/audio/AudioBus.cpp:
+        (WebCore::AudioBus::channelByType):
+        * platform/audio/AudioBus.h:
+        (WebCore::AudioBus::gain):
+        * platform/audio/AudioDSPKernelProcessor.cpp:
+        (WebCore::AudioDSPKernelProcessor::process):
+        * platform/audio/AudioDSPKernelProcessor.h:
+        * platform/audio/AudioProcessor.h:
+        * platform/audio/DynamicsCompressor.cpp:
+        (WebCore::DynamicsCompressor::process):
+        * platform/audio/DynamicsCompressor.h:
+        * platform/audio/DynamicsCompressorKernel.cpp:
+        (WebCore::DynamicsCompressorKernel::process):
+        * platform/audio/DynamicsCompressorKernel.h:
+        * platform/audio/EqualPowerPanner.cpp:
+        (WebCore::EqualPowerPanner::pan):
+        * platform/audio/EqualPowerPanner.h:
+        * platform/audio/HRTFElevation.h:
+        (WebCore::HRTFElevation::numberOfAzimuths):
+        * platform/audio/HRTFPanner.cpp:
+        (WebCore::HRTFPanner::pan):
+        * platform/audio/HRTFPanner.h:
+        * platform/audio/Panner.h:
+        * platform/audio/Reverb.cpp:
+        (WebCore::Reverb::process):
+        * platform/audio/Reverb.h:
+        * platform/audio/ReverbConvolver.cpp:
+        (WebCore::ReverbConvolver::process):
+        * platform/audio/ReverbConvolver.h:
+        * platform/audio/ffmpeg/FFTFrameFFMPEG.cpp:
+        (WebCore::FFTFrame::doFFT):
+        * platform/audio/mkl/FFTFrameMKL.cpp:
+        (WebCore::FFTFrame::doFFT):
+
+        The following functions were modified to use the renamed mutableData() accessor:
+        * platform/audio/AudioBus.cpp:
+        (WebCore::AudioBus::processWithGainFromMonoStereo):
+        (WebCore::AudioBus::copyWithSampleAccurateGainValuesFrom):
+        * platform/audio/AudioChannel.cpp:
+        (WebCore::AudioChannel::scale):
+        (WebCore::AudioChannel::copyFrom):
+        (WebCore::AudioChannel::copyFromRange):
+        (WebCore::AudioChannel::sumFrom):
+        * platform/audio/AudioDSPKernelProcessor.cpp:
+        (WebCore::AudioDSPKernelProcessor::process):
+        * platform/audio/AudioResampler.cpp:
+        (WebCore::AudioResampler::process):
+        * platform/audio/DynamicsCompressor.cpp:
+        (WebCore::DynamicsCompressor::process):
+        * platform/audio/EqualPowerPanner.cpp:
+        (WebCore::EqualPowerPanner::pan):
+        * platform/audio/HRTFKernel.cpp:
+        (WebCore::extractAverageGroupDelay):
+        (WebCore::HRTFKernel::HRTFKernel):
+        (WebCore::HRTFKernel::createImpulseResponse):
+        * platform/audio/HRTFPanner.cpp:
+        (WebCore::HRTFPanner::pan):
+        * platform/audio/MultiChannelResampler.cpp:
+        (WebCore::MultiChannelResampler::process):
+        * platform/audio/Reverb.cpp:
+        (WebCore::Reverb::process):
+        * platform/audio/ReverbConvolver.cpp:
+        (WebCore::ReverbConvolver::ReverbConvolver):
+        (WebCore::ReverbConvolver::process):
+        * platform/audio/mac/AudioFileReaderMac.cpp:
+        (WebCore::AudioFileReader::createBus):
+        * platform/audio/mac/FFTFrameMac.cpp:
+        (WebCore::FFTFrame::doFFT):
+        * webaudio/AudioBufferSourceNode.cpp:
+        (WebCore::AudioBufferSourceNode::process):
+        (WebCore::AudioBufferSourceNode::renderFromBuffer):
+        * webaudio/BiquadProcessor.cpp:
+        (WebCore::BiquadProcessor::process):
+        * webaudio/JavaScriptAudioNode.cpp:
+        (WebCore::JavaScriptAudioNode::process):
+        * webaudio/OfflineAudioDestinationNode.cpp:
+        (WebCore::OfflineAudioDestinationNode::render):
+        * webaudio/RealtimeAnalyser.cpp:
+        (WebCore::RealtimeAnalyser::writeInput):
+        * webaudio/WaveShaperProcessor.cpp:
+        (WebCore::WaveShaperProcessor::process):
+
 2012-01-19  Vsevolod Vlasov  <vsevik@chromium.org>
 
         Unreviewed, inspector closure compilation fix.
index a14c950..7a2251b 100644 (file)
@@ -127,7 +127,7 @@ public:
         memset(this->data() + start, 0, sizeof(T) * (end - start));
     }
 
-    void copyToRange(T* sourceData, unsigned start, unsigned end)
+    void copyToRange(const T* sourceData, unsigned start, unsigned end)
     {
         bool isSafe = (start <= end) && (end <= this->size());
         ASSERT(isSafe);
index 30489ba..b8107b9 100644 (file)
@@ -130,6 +130,11 @@ AudioChannel* AudioBus::channelByType(unsigned channelType)
     return 0;
 }
 
+const AudioChannel* AudioBus::channelByType(unsigned type) const
+{
+    return const_cast<AudioBus*>(this)->channelByType(type);
+}
+
 // Returns true if the channel count and frame-size match.
 bool AudioBus::topologyMatches(const AudioBus& bus) const
 {
@@ -143,7 +148,7 @@ bool AudioBus::topologyMatches(const AudioBus& bus) const
     return true;
 }
 
-PassOwnPtr<AudioBus> AudioBus::createBufferFromRange(AudioBus* sourceBuffer, unsigned startFrame, unsigned endFrame)
+PassOwnPtr<AudioBus> AudioBus::createBufferFromRange(const AudioBus* sourceBuffer, unsigned startFrame, unsigned endFrame)
 {
     size_t numberOfSourceFrames = sourceBuffer->length();
     unsigned numberOfChannels = sourceBuffer->numberOfChannels();
@@ -345,8 +350,8 @@ void AudioBus::processWithGainFromMonoStereo(const AudioBus &sourceBus, float* l
     const float* sourceL = sourceBusSafe.channelByType(ChannelLeft)->data();
     const float* sourceR = numberOfSourceChannels > 1 ? sourceBusSafe.channelByType(ChannelRight)->data() : 0;
 
-    float* destinationL = channelByType(ChannelLeft)->data();
-    float* destinationR = numberOfDestinationChannels > 1 ? channelByType(ChannelRight)->data() : 0;
+    float* destinationL = channelByType(ChannelLeft)->mutableData();
+    float* destinationR = numberOfDestinationChannels > 1 ? channelByType(ChannelRight)->mutableData() : 0;
 
     const float DezipperRate = 0.005f;
     int framesToProcess = length();
@@ -437,7 +442,7 @@ void AudioBus::copyWithSampleAccurateGainValuesFrom(const AudioBus &sourceBus, f
     for (unsigned channelIndex = 0; channelIndex < numberOfChannels(); ++channelIndex) {
         if (sourceBus.numberOfChannels() == numberOfChannels())
             source = sourceBus.channel(channelIndex)->data();
-        float* destination = channel(channelIndex)->data();
+        float* destination = channel(channelIndex)->mutableData();
         vmul(source, 1, gainValues, 1, destination, 1, numberOfGainValues);
     }
 }
@@ -452,7 +457,7 @@ void AudioBus::sumWithGainFrom(const AudioBus &sourceBus, float* lastMixGain, fl
     processWithGainFrom(sourceBus, lastMixGain, targetGain, true);
 }
 
-PassOwnPtr<AudioBus> AudioBus::createBySampleRateConverting(AudioBus* sourceBus, bool mixToMono, double newSampleRate)
+PassOwnPtr<AudioBus> AudioBus::createBySampleRateConverting(const AudioBus* sourceBus, bool mixToMono, double newSampleRate)
 {
     // sourceBus's sample-rate must be known.
     ASSERT(sourceBus && sourceBus->sampleRate());
@@ -476,7 +481,7 @@ PassOwnPtr<AudioBus> AudioBus::createBySampleRateConverting(AudioBus* sourceBus,
     }
     
     // First, mix to mono (if necessary) then sample-rate convert.
-    AudioBus* resamplerSourceBus;
+    const AudioBus* resamplerSourceBus;
     OwnPtr<AudioBus> mixedMonoBus;
     if (mixToMono) {
         mixedMonoBus = AudioBus::createByMixingToMono(sourceBus);
@@ -497,8 +502,8 @@ PassOwnPtr<AudioBus> AudioBus::createBySampleRateConverting(AudioBus* sourceBus,
 
     // Sample-rate convert each channel.
     for (unsigned i = 0; i < numberOfDestinationChannels; ++i) {
-        float* source = resamplerSourceBus->channel(i)->data();
-        float* destination = destinationBus->channel(i)->data();
+        const float* source = resamplerSourceBus->channel(i)->data();
+        float* destination = destinationBus->channel(i)->mutableData();
 
         SincResampler resampler(sampleRateRatio);
         resampler.process(source, destination, sourceLength);
@@ -508,7 +513,7 @@ PassOwnPtr<AudioBus> AudioBus::createBySampleRateConverting(AudioBus* sourceBus,
     return destinationBus.release();
 }
 
-PassOwnPtr<AudioBus> AudioBus::createByMixingToMono(AudioBus* sourceBus)
+PassOwnPtr<AudioBus> AudioBus::createByMixingToMono(const AudioBus* sourceBus)
 {
     switch (sourceBus->numberOfChannels()) {
     case 1:
@@ -519,9 +524,9 @@ PassOwnPtr<AudioBus> AudioBus::createByMixingToMono(AudioBus* sourceBus)
             unsigned n = sourceBus->length();
             OwnPtr<AudioBus> destinationBus(adoptPtr(new AudioBus(1, n)));
 
-            float* sourceL = sourceBus->channel(0)->data();
-            float* sourceR = sourceBus->channel(1)->data();
-            float* destination = destinationBus->channel(0)->data();
+            const float* sourceL = sourceBus->channel(0)->data();
+            const float* sourceR = sourceBus->channel(1)->data();
+            float* destination = destinationBus->channel(0)->mutableData();
         
             // Do the mono mixdown.
             for (unsigned i = 0; i < n; ++i)
index 6370c8b..b3da0f4 100644 (file)
@@ -71,6 +71,7 @@ public:
     AudioChannel* channel(unsigned channel) { return m_channels[channel].get(); }
     const AudioChannel* channel(unsigned channel) const { return const_cast<AudioBus*>(this)->m_channels[channel].get(); }
     AudioChannel* channelByType(unsigned type);
+    const AudioChannel* channelByType(unsigned type) const;
 
     // Number of sample-frames
     size_t length() const { return m_length; }
@@ -87,24 +88,24 @@ public:
 
     // Creates a new buffer from a range in the source buffer.
     // 0 may be returned if the range does not fit in the sourceBuffer
-    static PassOwnPtr<AudioBus> createBufferFromRange(AudioBus* sourceBuffer, unsigned startFrame, unsigned endFrame);
+    static PassOwnPtr<AudioBus> createBufferFromRange(const AudioBus* sourceBuffer, unsigned startFrame, unsigned endFrame);
 
 
     // Creates a new AudioBus by sample-rate converting sourceBus to the newSampleRate.
     // setSampleRate() must have been previously called on sourceBus.
     // Note: sample-rate conversion is already handled in the file-reading code for the mac port, so we don't need this.
-    static PassOwnPtr<AudioBus> createBySampleRateConverting(AudioBus* sourceBus, bool mixToMono, double newSampleRate);
+    static PassOwnPtr<AudioBus> createBySampleRateConverting(const AudioBus* sourceBus, bool mixToMono, double newSampleRate);
 
     // Creates a new AudioBus by mixing all the channels down to mono.
     // If sourceBus is already mono, then the returned AudioBus will simply be a copy.
-    static PassOwnPtr<AudioBus> createByMixingToMono(AudioBus* sourceBus);
+    static PassOwnPtr<AudioBus> createByMixingToMono(const AudioBus* sourceBus);
 
     // Scales all samples by the same amount.
     void scale(float scale);
 
     // Master gain for this bus - used with sumWithGainFrom() below
     void setGain(float gain) { m_busGain = gain; }
-    float gain() { return m_busGain; }
+    float gain() const { return m_busGain; }
 
     void reset() { m_isFirstTime = true; } // for de-zippering
 
index ebbf3bb..3c748e6 100644 (file)
@@ -43,7 +43,7 @@ using namespace VectorMath;
 
 void AudioChannel::scale(float scale)
 {
-    vsmul(data(), 1, &scale, data(), 1, length());
+    vsmul(data(), 1, &scale, mutableData(), 1, length());
 }
 
 void AudioChannel::copyFrom(const AudioChannel* sourceChannel)
@@ -53,7 +53,7 @@ void AudioChannel::copyFrom(const AudioChannel* sourceChannel)
     if (!isSafe)
         return;
 
-    memcpy(data(), sourceChannel->data(), sizeof(float) * length());
+    memcpy(mutableData(), sourceChannel->data(), sizeof(float) * length());
 }
 
 void AudioChannel::copyFromRange(const AudioChannel* sourceChannel, unsigned startFrame, unsigned endFrame)
@@ -72,7 +72,7 @@ void AudioChannel::copyFromRange(const AudioChannel* sourceChannel, unsigned sta
         return;
 
     const float* source = sourceChannel->data();
-    float* destination = data();
+    float* destination = mutableData();
     memcpy(destination, source + startFrame, sizeof(float) * rangeLength);
 }
 
@@ -83,7 +83,7 @@ void AudioChannel::sumFrom(const AudioChannel* sourceChannel)
     if (!isSafe)
         return;
 
-    vadd(data(), 1, sourceChannel->data(), 1, data(), 1, length());
+    vadd(data(), 1, sourceChannel->data(), 1, mutableData(), 1, length());
 }
 
 float AudioChannel::maxAbsValue() const
index 8803e75..24de3f9 100644 (file)
@@ -73,7 +73,7 @@ public:
     size_t length() const { return m_length; }
 
     // Direct access to PCM sample data
-    float* data() { return m_rawPointer ? m_rawPointer : m_memBuffer->data(); }
+    float* mutableData() { return m_rawPointer ? m_rawPointer : m_memBuffer->data(); }
     const float* data() const { return m_rawPointer ? m_rawPointer : m_memBuffer->data(); }
 
     // Zeroes out all sample values in buffer.
index cf4d2d3..5f9139f 100644 (file)
@@ -71,7 +71,7 @@ void AudioDSPKernelProcessor::uninitialize()
     m_initialized = false;
 }
 
-void AudioDSPKernelProcessor::process(AudioBus* source, AudioBus* destination, size_t framesToProcess)
+void AudioDSPKernelProcessor::process(const AudioBus* source, AudioBus* destination, size_t framesToProcess)
 {
     ASSERT(source && destination);
     if (!source || !destination)
@@ -88,7 +88,7 @@ void AudioDSPKernelProcessor::process(AudioBus* source, AudioBus* destination, s
         return;
         
     for (unsigned i = 0; i < m_kernels.size(); ++i)
-        m_kernels[i]->process(source->channel(i)->data(), destination->channel(i)->data(), framesToProcess);
+        m_kernels[i]->process(source->channel(i)->data(), destination->channel(i)->mutableData(), framesToProcess);
 }
 
 // Resets filter state
index 40b5ab8..7f8f81d 100644 (file)
@@ -59,7 +59,7 @@ public:
     // AudioProcessor methods
     virtual void initialize();
     virtual void uninitialize();
-    virtual void process(AudioBus* source, AudioBus* destination, size_t framesToProcess);
+    virtual void process(const AudioBus* source, AudioBus* destination, size_t framesToProcess);
     virtual void reset();
     virtual void setNumberOfChannels(unsigned numberOfChannels);
 
index 2d7b60a..469f833 100644 (file)
@@ -54,7 +54,7 @@ public:
     virtual void uninitialize() = 0;
 
     // Processes the source to destination bus.  The number of channels must match in source and destination.
-    virtual void process(AudioBus* source, AudioBus* destination, size_t framesToProcess) = 0;
+    virtual void process(const AudioBus* source, AudioBus* destination, size_t framesToProcess) = 0;
 
     // Resets filter state
     virtual void reset() = 0;
index ba5b58e..1a9f81e 100644 (file)
@@ -103,7 +103,7 @@ void AudioResampler::process(AudioSourceProvider* provider, AudioBus* destinatio
     // Now that we have the source data, resample each channel into the destination bus.
     // FIXME: optimize for the common stereo case where it's faster to process both left/right channels in the same inner loop.
     for (unsigned i = 0; i < numberOfChannels; ++i) {
-        float* destination = destinationBus->channel(i)->data();
+        float* destination = destinationBus->channel(i)->mutableData();
         m_kernels[i]->process(destination, framesToProcess);
     }
 }
index c4795f4..a552057 100644 (file)
@@ -115,10 +115,10 @@ void DynamicsCompressor::setEmphasisParameters(float gain, float anchorFreq, flo
     setEmphasisStageParameters(3, gain, anchorFreq / (filterStageRatio * filterStageRatio * filterStageRatio));
 }
 
-void DynamicsCompressor::process(AudioBus* sourceBus, AudioBus* destinationBus, unsigned framesToProcess)
+void DynamicsCompressor::process(const AudioBus* sourceBus, AudioBus* destinationBus, unsigned framesToProcess)
 {
-    float* sourceL = sourceBus->channel(0)->data();
-    float* sourceR;
+    const float* sourceL = sourceBus->channel(0)->data();
+    const float* sourceR;
 
     if (sourceBus->numberOfChannels() > 1)
         sourceR = sourceBus->channel(1)->data();
@@ -127,8 +127,8 @@ void DynamicsCompressor::process(AudioBus* sourceBus, AudioBus* destinationBus,
 
     ASSERT(destinationBus->numberOfChannels() == 2);
 
-    float* destinationL = destinationBus->channel(0)->data();
-    float* destinationR = destinationBus->channel(1)->data();
+    float* destinationL = destinationBus->channel(0)->mutableData();
+    float* destinationR = destinationBus->channel(1)->mutableData();
 
     float filterStageGain = parameterValue(ParamFilterStageGain);
     float filterStageRatio = parameterValue(ParamFilterStageRatio);
index 2152951..e0115ee 100644 (file)
@@ -64,7 +64,7 @@ public:
 
     DynamicsCompressor(bool isStereo, float sampleRate);
 
-    void process(AudioBus* sourceBus, AudioBus* destinationBus, unsigned framesToProcess);
+    void process(const AudioBus* sourceBus, AudioBus* destinationBus, unsigned framesToProcess);
     void reset();
 
     float parameterValue(unsigned parameterID);
index e9f496a..a7a4c12 100644 (file)
@@ -82,9 +82,9 @@ void DynamicsCompressorKernel::setPreDelayTime(float preDelayTime)
     }
 }
 
-void DynamicsCompressorKernel::process(float* sourceL,
+void DynamicsCompressorKernel::process(const float* sourceL,
                                        float* destinationL,
-                                       float* sourceR, /* stereo-linked */
+                                       const float* sourceR, /* stereo-linked */
                                        float* destinationR,
                                        unsigned framesToProcess,
 
index 8e5f709..cf319b3 100644 (file)
@@ -38,9 +38,9 @@ public:
     DynamicsCompressorKernel(float sampleRate);
 
     // Performs stereo-linked compression.
-    void process(float *sourceL,
+    void process(const float *sourceL,
                  float *destinationL,
-                 float *sourceR,
+                 const float *sourceR,
                  float *destinationR,
                  unsigned framesToProcess,
 
index 0da7622..a2d2dff 100644 (file)
@@ -49,7 +49,7 @@ EqualPowerPanner::EqualPowerPanner(float sampleRate)
     m_smoothingConstant = AudioUtilities::discreteTimeConstantForSampleRate(SmoothingTimeConstant, sampleRate);
 }
 
-void EqualPowerPanner::pan(double azimuth, double /*elevation*/, AudioBus* inputBus, AudioBus* outputBus, size_t framesToProcess)
+void EqualPowerPanner::pan(double azimuth, double /*elevation*/, const AudioBus* inputBus, AudioBus* outputBus, size_t framesToProcess)
 {
     // FIXME: implement stereo sources
     bool isInputSafe = inputBus && inputBus->numberOfChannels() == 1 && framesToProcess <= inputBus->length();
@@ -62,10 +62,10 @@ void EqualPowerPanner::pan(double azimuth, double /*elevation*/, AudioBus* input
     if (!isOutputSafe)
         return;
 
-    AudioChannel* channel = inputBus->channel(0);
-    float* sourceP = channel->data();                               
-    float* destinationL = outputBus->channelByType(AudioBus::ChannelLeft)->data();
-    float* destinationR = outputBus->channelByType(AudioBus::ChannelRight)->data();
+    const AudioChannel* channel = inputBus->channel(0);
+    const float* sourceP = channel->data();                               
+    float* destinationL = outputBus->channelByType(AudioBus::ChannelLeft)->mutableData();
+    float* destinationR = outputBus->channelByType(AudioBus::ChannelRight)->mutableData();
 
     if (!sourceP || !destinationL || !destinationR)
         return;
index 4f6001d..016cd4a 100644 (file)
@@ -35,7 +35,7 @@ class EqualPowerPanner : public Panner {
 public:
     EqualPowerPanner(float sampleRate);
 
-    virtual void pan(double azimuth, double elevation, AudioBus* inputBus, AudioBus* outputBuf, size_t framesToProcess);
+    virtual void pan(double azimuth, double elevation, const AudioBus* inputBus, AudioBus* outputBuf, size_t framesToProcess);
 
     virtual void reset() { m_isFirstRender = true; }
 
index 9093433..2321de0 100644 (file)
@@ -47,7 +47,7 @@ FFTConvolver::FFTConvolver(size_t fftSize)
 {
 }
 
-void FFTConvolver::process(FFTFrame* fftKernel, float* sourceP, float* destP, size_t framesToProcess)
+void FFTConvolver::process(FFTFrame* fftKernel, const float* sourceP, float* destP, size_t framesToProcess)
 {
     // FIXME: make so framesToProcess is not required to fit evenly into fftSize/2
 
index c1b5002..375bf2c 100644 (file)
@@ -46,7 +46,7 @@ public:
     // The input to output latency is equal to fftSize / 2
     //
     // Processing in-place is allowed...
-    void process(FFTFrame* fftKernel, float* sourceP, float* destP, size_t framesToProcess);
+    void process(FFTFrame* fftKernel, const float* sourceP, float* destP, size_t framesToProcess);
 
     void reset();
 
index 4f032a2..a1a0a50 100644 (file)
@@ -43,7 +43,7 @@
 
 namespace WebCore {
 
-void FFTFrame::doPaddedFFT(float* data, size_t dataSize)
+void FFTFrame::doPaddedFFT(const float* data, size_t dataSize)
 {
     // Zero-pad the impulse response
     AudioFloatArray paddedResponse(fftSize()); // zero-initialized
index b25d279..042633c 100644 (file)
@@ -73,7 +73,7 @@ public:
 
     static void initialize();
     static void cleanup();
-    void doFFT(float* data);
+    void doFFT(const float* data);
     void doInverseFFT(float* data);
     void multiply(const FFTFrame& frame); // multiplies ourself with frame : effectively operator*=()
 
@@ -88,7 +88,7 @@ public:
     // Interpolates from frame1 -> frame2 as x goes from 0.0 -> 1.0
     static PassOwnPtr<FFTFrame> createInterpolatedFrame(const FFTFrame& frame1, const FFTFrame& frame2, double x);
 
-    void doPaddedFFT(float* data, size_t dataSize); // zero-padding with dataSize <= fftSize
+    void doPaddedFFT(const float* data, size_t dataSize); // zero-padding with dataSize <= fftSize
     double extractAverageGroupDelay();
     void addConstantGroupDelay(double sampleFrameDelay);
 
index ccff097..446e66d 100644 (file)
@@ -60,7 +60,7 @@ public:
     HRTFKernelList* kernelListR() { return m_kernelListR.get(); }
 
     double elevationAngle() const { return m_elevationAngle; }
-    unsigned numberOfAzimuths() { return NumberOfTotalAzimuths; }
+    unsigned numberOfAzimuths() const { return NumberOfTotalAzimuths; }
     float sampleRate() const { return m_sampleRate; }
     
     // Returns the left and right kernels for the given azimuth index.
index c44de93..391f904 100644 (file)
@@ -50,7 +50,7 @@ static float extractAverageGroupDelay(AudioChannel* channel, size_t analysisFFTS
 {
     ASSERT(channel);
         
-    float* impulseP = channel->data();
+    float* impulseP = channel->mutableData();
     
     bool isSizeGood = channel->length() >= analysisFFTSize;
     ASSERT(isSizeGood);
@@ -78,7 +78,7 @@ HRTFKernel::HRTFKernel(AudioChannel* channel, size_t fftSize, float sampleRate,
     // Determine the leading delay (average group delay) for the response.
     m_frameDelay = extractAverageGroupDelay(channel, fftSize / 2);
 
-    float* impulseResponse = channel->data();
+    float* impulseResponse = channel->mutableData();
     size_t responseLength = channel->length();
 
     if (bassBoost) {
@@ -114,7 +114,7 @@ PassOwnPtr<AudioChannel> HRTFKernel::createImpulseResponse()
 
     // Add leading delay back in.
     fftFrame.addConstantGroupDelay(m_frameDelay);
-    fftFrame.doInverseFFT(channel->data());
+    fftFrame.doInverseFFT(channel->mutableData());
 
     return channel.release();
 }
index 978371c..f09961f 100644 (file)
@@ -111,7 +111,7 @@ int HRTFPanner::calculateDesiredAzimuthIndexAndBlend(double azimuth, double& azi
     return desiredAzimuthIndex;
 }
 
-void HRTFPanner::pan(double desiredAzimuth, double elevation, AudioBus* inputBus, AudioBus* outputBus, size_t framesToProcess)
+void HRTFPanner::pan(double desiredAzimuth, double elevation, const AudioBus* inputBus, AudioBus* outputBus, size_t framesToProcess)
 {
     unsigned numInputChannels = inputBus ? inputBus->numberOfChannels() : 0;
 
@@ -147,14 +147,14 @@ void HRTFPanner::pan(double desiredAzimuth, double elevation, AudioBus* inputBus
 
     // Normally, we'll just be dealing with mono sources.
     // If we have a stereo input, implement stereo panning with left source processed by left HRTF, and right source by right HRTF.
-    AudioChannel* inputChannelL = inputBus->channelByType(AudioBus::ChannelLeft);
-    AudioChannel* inputChannelR = numInputChannels > 1 ? inputBus->channelByType(AudioBus::ChannelRight) : 0;
+    const AudioChannel* inputChannelL = inputBus->channelByType(AudioBus::ChannelLeft);
+    const AudioChannel* inputChannelR = numInputChannels > 1 ? inputBus->channelByType(AudioBus::ChannelRight) : 0;
 
     // Get source and destination pointers.
-    float* sourceL = inputChannelL->data();
-    float* sourceR = numInputChannels > 1 ? inputChannelR->data() : sourceL;
-    float* destinationL = outputBus->channelByType(AudioBus::ChannelLeft)->data();
-    float* destinationR = outputBus->channelByType(AudioBus::ChannelRight)->data();
+    const float* sourceL = inputChannelL->data();
+    const float* sourceR = numInputChannels > 1 ? inputChannelR->data() : sourceL;
+    float* destinationL = outputBus->channelByType(AudioBus::ChannelLeft)->mutableData();
+    float* destinationR = outputBus->channelByType(AudioBus::ChannelRight)->mutableData();
 
     double azimuthBlend;
     int desiredAzimuthIndex = calculateDesiredAzimuthIndexAndBlend(azimuth, azimuthBlend);
@@ -207,8 +207,8 @@ void HRTFPanner::pan(double desiredAzimuth, double elevation, AudioBus* inputBus
             
         // Calculate the source and destination pointers for the current segment.
         unsigned offset = segment * framesPerSegment;
-        float* segmentSourceL = sourceL + offset;
-        float* segmentSourceR = sourceR + offset;
+        const float* segmentSourceL = sourceL + offset;
+        const float* segmentSourceR = sourceR + offset;
         float* segmentDestinationL = destinationL + offset;
         float* segmentDestinationR = destinationR + offset;
 
index e771ba2..ad6f79a 100644 (file)
@@ -37,7 +37,7 @@ public:
     virtual ~HRTFPanner();
 
     // Panner
-    virtual void pan(double azimuth, double elevation, AudioBus* inputBus, AudioBus* outputBus, size_t framesToProcess);
+    virtual void pan(double azimuth, double elevation, const AudioBus* inputBus, AudioBus* outputBus, size_t framesToProcess);
     virtual void reset();
 
     size_t fftSize() { return fftSizeForSampleRate(m_sampleRate); }
index db51e90..f6c07e7 100644 (file)
@@ -77,7 +77,7 @@ public:
         // Copy the channel data from what we received from m_multiChannelProvider.
         ASSERT(m_currentChannel <= m_numberOfChannels);
         if (m_currentChannel < m_numberOfChannels) {
-            memcpy(bus->channel(0)->data(), m_multiChannelBus->channel(m_currentChannel)->data(), sizeof(float) * framesToProcess);
+            memcpy(bus->channel(0)->mutableData(), m_multiChannelBus->channel(m_currentChannel)->data(), sizeof(float) * framesToProcess);
             ++m_currentChannel;
         }
     }
@@ -113,7 +113,7 @@ void MultiChannelResampler::process(AudioSourceProvider* provider, AudioBus* des
         // However, if it calls provideInput() for the first channel, then it will call it for the remaining
         // channels, since they all buffer in the same way and are processing the same number of frames.
         m_kernels[channelIndex]->process(&channelProvider,
-                                         destination->channel(channelIndex)->data(),
+                                         destination->channel(channelIndex)->mutableData(),
                                          framesToProcess);
     }
 }
index 4b72832..d8b8dd0 100644 (file)
@@ -53,7 +53,7 @@ public:
 
     PanningModel panningModel() const { return m_panningModel; }
 
-    virtual void pan(double azimuth, double elevation, AudioBus* inputBus, AudioBus* outputBus, size_t framesToProcess) = 0;
+    virtual void pan(double azimuth, double elevation, const AudioBus* inputBus, AudioBus* outputBus, size_t framesToProcess) = 0;
 
     virtual void reset() = 0;
 
index 341626f..122e21b 100644 (file)
@@ -130,7 +130,7 @@ void Reverb::initialize(AudioBus* impulseResponseBuffer, size_t renderSliceSize,
         m_tempBuffer = adoptPtr(new AudioBus(2, MaxFrameSize));
 }
 
-void Reverb::process(AudioBus* sourceBus, AudioBus* destinationBus, size_t framesToProcess)
+void Reverb::process(const AudioBus* sourceBus, AudioBus* destinationBus, size_t framesToProcess)
 {
     // Do a fairly comprehensive sanity check.
     // If these conditions are satisfied, all of the source and destination pointers will be valid for the various matrixing cases.
@@ -148,7 +148,7 @@ void Reverb::process(AudioBus* sourceBus, AudioBus* destinationBus, size_t frame
     }
 
     AudioChannel* destinationChannelL = destinationBus->channel(0);
-    AudioChannel* sourceChannelL = sourceBus->channel(0);
+    const AudioChannel* sourceChannelL = sourceBus->channel(0);
 
     // Handle input -> output matrixing...
     size_t numInputChannels = sourceBus->numberOfChannels();
@@ -157,7 +157,7 @@ void Reverb::process(AudioBus* sourceBus, AudioBus* destinationBus, size_t frame
 
     if (numInputChannels == 2 && numReverbChannels == 2 && numOutputChannels == 2) {
         // 2 -> 2 -> 2
-        AudioChannel* sourceChannelR = sourceBus->channel(1);
+        const AudioChannel* sourceChannelR = sourceBus->channel(1);
         AudioChannel* destinationChannelR = destinationBus->channel(1);
         m_convolvers[0]->process(sourceChannelL, destinationChannelL, framesToProcess);
         m_convolvers[1]->process(sourceChannelR, destinationChannelR, framesToProcess);
@@ -177,13 +177,13 @@ void Reverb::process(AudioBus* sourceBus, AudioBus* destinationBus, size_t frame
         ASSERT(isCopySafe);
         if (!isCopySafe)
             return;
-        memcpy(destinationChannelR->data(), destinationChannelL->data(), sizeof(float) * framesToProcess);
+        memcpy(destinationChannelR->mutableData(), destinationChannelL->data(), sizeof(float) * framesToProcess);
     } else if (numInputChannels == 1 && numReverbChannels == 1 && numOutputChannels == 1) {
         // 1 -> 1 -> 1
         m_convolvers[0]->process(sourceChannelL, destinationChannelL, framesToProcess);
     } else if (numInputChannels == 2 && numReverbChannels == 4 && numOutputChannels == 2) {
         // 2 -> 4 -> 2 ("True" stereo)
-        AudioChannel* sourceChannelR = sourceBus->channel(1);
+        const AudioChannel* sourceChannelR = sourceBus->channel(1);
         AudioChannel* destinationChannelR = destinationBus->channel(1);
 
         AudioChannel* tempChannelL = m_tempBuffer->channel(0);
index f162e0b..779e7bb 100644 (file)
@@ -45,7 +45,7 @@ public:
     // renderSliceSize is a rendering hint, so the FFTs can be optimized to not all occur at the same time (very bad when rendering on a real-time thread).
     Reverb(AudioBus* impulseResponseBuffer, size_t renderSliceSize, size_t maxFFTSize, size_t numberOfChannels, bool useBackgroundThreads, bool normalize);
 
-    void process(AudioBus* sourceBus, AudioBus* destinationBus, size_t framesToProcess);
+    void process(const AudioBus* sourceBus, AudioBus* destinationBus, size_t framesToProcess);
     void reset();
 
     unsigned impulseResponseLength() const { return m_impulseResponseLength; }
index bbb5508..c611414 100644 (file)
@@ -82,7 +82,7 @@ ReverbConvolver::ReverbConvolver(AudioChannel* impulseResponse, size_t renderSli
     // Otherwise, assume we're being run from a command-line tool.
     bool hasRealtimeConstraint = useBackgroundThreads;
 
-    float* response = impulseResponse->data();
+    const float* response = impulseResponse->data();
     size_t totalResponseLength = impulseResponse->length();
 
     // Because we're not using direct-convolution in the leading portion, the reverb has an overall latency of half the first-stage FFT size
@@ -175,15 +175,15 @@ void ReverbConvolver::backgroundThreadEntry()
     }
 }
 
-void ReverbConvolver::process(AudioChannel* sourceChannel, AudioChannel* destinationChannel, size_t framesToProcess)
+void ReverbConvolver::process(const AudioChannel* sourceChannel, AudioChannel* destinationChannel, size_t framesToProcess)
 {
     bool isSafe = sourceChannel && destinationChannel && sourceChannel->length() >= framesToProcess && destinationChannel->length() >= framesToProcess;
     ASSERT(isSafe);
     if (!isSafe)
         return;
         
-    float* source = sourceChannel->data();
-    float* destination = destinationChannel->data();
+    const float* source = sourceChannel->data();
+    float* destination = destinationChannel->mutableData();
     bool isDataSafe = source && destination;
     ASSERT(isDataSafe);
     if (!isDataSafe)
index 013b684..370b872 100644 (file)
@@ -52,7 +52,7 @@ public:
     ReverbConvolver(AudioChannel* impulseResponse, size_t renderSliceSize, size_t maxFFTSize, size_t convolverRenderPhase, bool useBackgroundThreads);
     ~ReverbConvolver();
 
-    void process(AudioChannel* sourceChannel, AudioChannel* destinationChannel, size_t framesToProcess);
+    void process(const AudioChannel* sourceChannel, AudioChannel* destinationChannel, size_t framesToProcess);
     void reset();
 
     size_t impulseResponseLength() const { return m_impulseResponseLength; }
index f207d19..53bb650 100644 (file)
@@ -43,7 +43,7 @@ namespace WebCore {
 
 using namespace VectorMath;
 
-ReverbConvolverStage::ReverbConvolverStage(float* impulseResponse, size_t responseLength, size_t reverbTotalLatency, size_t stageOffset, size_t stageLength,
+ReverbConvolverStage::ReverbConvolverStage(const float* impulseResponse, size_t responseLength, size_t reverbTotalLatency, size_t stageOffset, size_t stageLength,
                                            size_t fftSize, size_t renderPhase, size_t renderSliceSize, ReverbAccumulationBuffer* accumulationBuffer)
     : m_fftKernel(fftSize)
     , m_accumulationBuffer(accumulationBuffer)
@@ -88,7 +88,7 @@ void ReverbConvolverStage::processInBackground(ReverbConvolver* convolver, size_
     process(source, framesToProcess);
 }
 
-void ReverbConvolverStage::process(float* source, size_t framesToProcess)
+void ReverbConvolverStage::process(const float* source, size_t framesToProcess)
 {
     ASSERT(source);
     if (!source)
@@ -96,7 +96,8 @@ void ReverbConvolverStage::process(float* source, size_t framesToProcess)
     
     // Deal with pre-delay stream : note special handling of zero delay.
 
-    float* preDelayedSource;
+    const float* preDelayedSource;
+    float* preDelayedDestination;
     float* temporaryBuffer;
     bool isTemporaryBufferSafe = false;
     if (m_preDelayLength > 0) {
@@ -108,10 +109,12 @@ void ReverbConvolverStage::process(float* source, size_t framesToProcess)
 
         isTemporaryBufferSafe = framesToProcess <= m_temporaryBuffer.size();
 
-        preDelayedSource = m_preDelayBuffer.data() + m_preReadWriteIndex;
+        preDelayedDestination = m_preDelayBuffer.data() + m_preReadWriteIndex;
+        preDelayedSource = preDelayedDestination;
         temporaryBuffer = m_temporaryBuffer.data();        
     } else {
         // Zero delay
+        preDelayedDestination = 0;
         preDelayedSource = source;
         temporaryBuffer = m_preDelayBuffer.data();
         
@@ -138,7 +141,7 @@ void ReverbConvolverStage::process(float* source, size_t framesToProcess)
 
     // Finally copy input to pre-delay.
     if (m_preDelayLength > 0) {
-        memcpy(preDelayedSource, source, sizeof(float) * framesToProcess);
+        memcpy(preDelayedDestination, source, sizeof(float) * framesToProcess);
         m_preReadWriteIndex += framesToProcess;
 
         ASSERT(m_preReadWriteIndex <= m_preDelayLength);
index fc05a0e..9811bc6 100644 (file)
@@ -45,11 +45,11 @@ class ReverbConvolverStage {
 public:
     // renderPhase is useful to know so that we can manipulate the pre versus post delay so that stages will perform
     // their heavy work (FFT processing) on different slices to balance the load in a real-time thread.
-    ReverbConvolverStage(float* impulseResponse, size_t responseLength, size_t reverbTotalLatency, size_t stageOffset, size_t stageLength,
+    ReverbConvolverStage(const float* impulseResponse, size_t responseLength, size_t reverbTotalLatency, size_t stageOffset, size_t stageLength,
                          size_t fftSize, size_t renderPhase, size_t renderSliceSize, ReverbAccumulationBuffer* accumulationBuffer);
 
     // WARNING: framesToProcess must be such that it evenly divides the delay buffer size (stage_offset).
-    void process(float* source, size_t framesToProcess);
+    void process(const float* source, size_t framesToProcess);
 
     void processInBackground(ReverbConvolver* convolver, size_t framesToProcess);
 
index f270f6f..1be9af8 100644 (file)
@@ -40,7 +40,7 @@ ReverbInputBuffer::ReverbInputBuffer(size_t length)
 {
 }
 
-void ReverbInputBuffer::write(float* sourceP, size_t numberOfFrames)
+void ReverbInputBuffer::write(const float* sourceP, size_t numberOfFrames)
 {
     size_t bufferLength = m_buffer.size();
     bool isCopySafe = m_writeIndex + numberOfFrames <= bufferLength;
index 15a2818..5036575 100644 (file)
@@ -41,7 +41,7 @@ public:
     // The realtime audio thread keeps writing samples here.
     // The assumption is that the buffer's length is evenly divisible by numberOfFrames (for nearly all cases this will be fine).
     // FIXME: remove numberOfFrames restriction...
-    void write(float* sourceP, size_t numberOfFrames);
+    void write(const float* sourceP, size_t numberOfFrames);
 
     // Background threads can call this to check if there's anything to read...
     size_t writeIndex() const { return m_writeIndex; }
index 0e4b849..1ee692c 100644 (file)
@@ -135,6 +135,8 @@ void SincResampler::consumeSource(float* buffer, unsigned numberOfSourceFrames)
     
     // Wrap the provided buffer by an AudioBus for use by the source provider.
     AudioBus bus(1, numberOfSourceFrames, false);
+
+    // FIXME: Find a way to make the following const-correct:
     bus.setChannelMemory(0, buffer, numberOfSourceFrames);
     
     m_sourceProvider->provideInput(&bus, numberOfSourceFrames);
@@ -146,7 +148,7 @@ namespace {
 
 class BufferSourceProvider : public AudioSourceProvider {
 public:
-    BufferSourceProvider(float* source, size_t numberOfSourceFrames)
+    BufferSourceProvider(const float* source, size_t numberOfSourceFrames)
         : m_source(source)
         , m_sourceFramesAvailable(numberOfSourceFrames)
     {
@@ -159,7 +161,7 @@ public:
         if (!m_source || !bus)
             return;
             
-        float* buffer = bus->channel(0)->data();
+        float* buffer = bus->channel(0)->mutableData();
 
         // Clamp to number of frames available and zero-pad.
         size_t framesToCopy = min(m_sourceFramesAvailable, framesToProcess);
@@ -174,13 +176,13 @@ public:
     }
     
 private:
-    float* m_source;
+    const float* m_source;
     size_t m_sourceFramesAvailable;
 };
 
 } // namespace
 
-void SincResampler::process(float* source, float* destination, unsigned numberOfSourceFrames)
+void SincResampler::process(const float* source, float* destination, unsigned numberOfSourceFrames)
 {
     // Resample an in-memory buffer using an AudioSourceProvider.
     BufferSourceProvider sourceProvider(source, numberOfSourceFrames);
index bbe0c55..04dbf3f 100644 (file)
@@ -44,7 +44,7 @@ public:
     SincResampler(double scaleFactor, unsigned kernelSize = 32, unsigned numberOfKernelOffsets = 32);
     
     // Processes numberOfSourceFrames from source to produce numberOfSourceFrames / scaleFactor frames in destination.
-    void process(float* source, float* destination, unsigned numberOfSourceFrames);
+    void process(const float* source, float* destination, unsigned numberOfSourceFrames);
 
     // Process with input source callback function for streaming applications.
     void process(AudioSourceProvider*, float* destination, size_t framesToProcess);
@@ -71,7 +71,7 @@ protected:
     // Source is copied into this buffer for each processing pass.
     AudioFloatArray m_inputBuffer;
 
-    float* m_source;
+    const float* m_source;
     unsigned m_sourceFramesAvailable;
     
     // m_sourceProvider is used to provide the audio input stream to the resampler.
index 2fa4400..9e6f1b6 100644 (file)
@@ -36,7 +36,7 @@
 
 namespace WebCore {
 
-void ZeroPole::process(float *source, float *destination, unsigned framesToProcess)
+void ZeroPole::process(const float *source, float *destination, unsigned framesToProcess)
 {
     float zero = m_zero;
     float pole = m_pole;
index 93fd0d6..4cb1d17 100644 (file)
@@ -43,7 +43,7 @@ public:
     {
     }
 
-    void process(float *source, float *destination, unsigned framesToProcess);
+    void process(const float *source, float *destination, unsigned framesToProcess);
 
     // Reset filter state.
     void reset() { m_lastX = 0; m_lastY = 0; }
index 9f89324..d4624a7 100644 (file)
@@ -135,7 +135,7 @@ void FFTFrame::multiply(const FFTFrame& frame)
     VectorMath::vsmul(imagP1, 1, &scale, imagP1, 1, halfSize);
 }
 
-void FFTFrame::doFFT(float* data)
+void FFTFrame::doFFT(const float* data)
 {
     // Copy since processing is in-place.
     float* p = m_complexData.data();
index 9550263..d0ecf1a 100644 (file)
@@ -212,7 +212,7 @@ PassOwnPtr<AudioBus> AudioFileReader::createBus(float sampleRate, bool mixToMono
         for (size_t i = 0; i < numberOfChannels; ++i) {
             bufferList->mBuffers[i].mNumberChannels = 1;
             bufferList->mBuffers[i].mDataByteSize = numberOfFrames * sizeof(float);
-            bufferList->mBuffers[i].mData = audioBus->channel(i)->data();
+            bufferList->mBuffers[i].mData = audioBus->channel(i)->mutableData();
         }
     }
 
@@ -224,7 +224,7 @@ PassOwnPtr<AudioBus> AudioFileReader::createBus(float sampleRate, bool mixToMono
 
     if (mixToMono && numberOfChannels == 2) {
         // Mix stereo down to mono
-        float* destL = audioBus->channel(0)->data();
+        float* destL = audioBus->channel(0)->mutableData();
         for (size_t i = 0; i < numberOfFrames; i++)
             destL[i] = 0.5f * (bufferL[i] + bufferR[i]);
     }
index 6dfbbec..8cef928 100644 (file)
@@ -127,7 +127,7 @@ void FFTFrame::multiply(const FFTFrame& frame)
     VectorMath::vsmul(imagP1, 1, &scale, imagP1, 1, halfSize);
 }
 
-void FFTFrame::doFFT(float* data)
+void FFTFrame::doFFT(const float* data)
 {
     vDSP_ctoz((DSPComplex*)data, 2, &m_frame, 1, m_FFTSize / 2);
     vDSP_fft_zrip(m_FFTSetup, &m_frame, 1, m_log2FFTSize, FFT_FORWARD);
index 6bf2c1e..0f3a282 100644 (file)
@@ -170,7 +170,7 @@ void FFTFrame::multiply(const FFTFrame& frame)
     }
 }
 
-void FFTFrame::doFFT(float* data)
+void FFTFrame::doFFT(const float* data)
 {
     // Compute Forward transform.
     MKL_LONG status = DftiComputeForward(m_handle, data, m_complexData.data());
index 1fd2f38..a0cb13a 100644 (file)
@@ -142,7 +142,7 @@ void AudioBufferSourceNode::process(size_t framesToProcess)
             
             if (isSafe) {
                 for (unsigned i = 0; i < outputBus->numberOfChannels(); ++i)
-                    memset(outputBus->channel(i)->data() + zeroStartFrame, 0, sizeof(float) * framesToZero);
+                    memset(outputBus->channel(i)->mutableData() + zeroStartFrame, 0, sizeof(float) * framesToZero);
             }
 
             m_isPlaying = false;
@@ -199,11 +199,11 @@ void AudioBufferSourceNode::renderFromBuffer(AudioBus* bus, unsigned destination
         return;
 
     // Get the destination pointers.
-    float* destinationL = bus->channel(0)->data();
+    float* destinationL = bus->channel(0)->mutableData();
     ASSERT(destinationL);
     if (!destinationL)
         return;
-    float* destinationR = (numberOfChannels < 2) ? 0 : bus->channel(1)->data();
+    float* destinationR = (numberOfChannels < 2) ? 0 : bus->channel(1)->mutableData();
     
     bool isStereo = destinationR;
     
index 12243c0..e1aec75 100644 (file)
@@ -114,7 +114,7 @@ void BiquadProcessor::checkForDirtyCoefficients()
     }
 }
 
-void BiquadProcessor::process(AudioBus* source, AudioBus* destination, size_t framesToProcess)
+void BiquadProcessor::process(const AudioBus* source, AudioBus* destination, size_t framesToProcess)
 {
     if (!isInitialized()) {
         destination->zero();
@@ -125,7 +125,7 @@ void BiquadProcessor::process(AudioBus* source, AudioBus* destination, size_t fr
             
     // For each channel of our input, process using the corresponding BiquadDSPKernel into the output channel.
     for (unsigned i = 0; i < m_kernels.size(); ++i)
-        m_kernels[i]->process(source->channel(i)->data(), destination->channel(i)->data(), framesToProcess);
+        m_kernels[i]->process(source->channel(i)->data(), destination->channel(i)->mutableData(), framesToProcess);
 }
 
 void BiquadProcessor::setType(FilterType type)
index c7f7d30..91a1ccc 100644 (file)
@@ -58,7 +58,7 @@ public:
     
     virtual PassOwnPtr<AudioDSPKernel> createKernel();
         
-    virtual void process(AudioBus* source, AudioBus* destination, size_t framesToProcess);
+    virtual void process(const AudioBus* source, AudioBus* destination, size_t framesToProcess);
 
     // Get the magnitude and phase response of the filter at the given
     // set of frequencies (in Hz). The phase response is in radians.
index 4221509..bf83147 100644 (file)
@@ -161,10 +161,10 @@ void JavaScriptAudioNode::process(size_t framesToProcess)
     if (!channelsAreGood)
         return;
 
-    float* sourceL = inputBus->channel(0)->data();
-    float* sourceR = numberOfInputChannels > 1 ? inputBus->channel(1)->data() : 0;
-    float* destinationL = outputBus->channel(0)->data();
-    float* destinationR = outputBus->channel(1)->data();
+    const float* sourceL = inputBus->channel(0)->data();
+    const float* sourceR = numberOfInputChannels > 1 ? inputBus->channel(1)->data() : 0;
+    float* destinationL = outputBus->channel(0)->mutableData();
+    float* destinationR = outputBus->channel(1)->mutableData();
 
     // Copy from the input to the input buffer.  See "buffersAreGood" check above for safety.
     size_t bytesToCopy = sizeof(float) * framesToProcess;
index ae1835b..198a502 100644 (file)
@@ -140,7 +140,7 @@ void OfflineAudioDestinationNode::render()
         size_t framesAvailableToCopy = min(framesToProcess, renderQuantumSize);
         
         for (unsigned channelIndex = 0; channelIndex < numberOfChannels; ++channelIndex) {
-            float* source = m_renderBus->channel(channelIndex)->data();
+            const float* source = m_renderBus->channel(channelIndex)->data();
             float* destination = m_renderTarget->getChannelData(channelIndex)->data();
             memcpy(destination + n, source, sizeof(float) * framesAvailableToCopy);
         }
index ccd8655..a3f807d 100644 (file)
@@ -113,7 +113,7 @@ void RealtimeAnalyser::writeInput(AudioBus* bus, size_t framesToProcess)
     
     // Perform real-time analysis
     // FIXME : for now just use left channel (must mix if stereo source)
-    float* source = bus->channel(0)->data();
+    const float* source = bus->channel(0)->data();
 
     // The source has already been sanity checked with isBusGood above.
     
index f7571de..4f83e16 100644 (file)
@@ -56,7 +56,7 @@ void WaveShaperProcessor::setCurve(Float32Array* curve)
     m_curve = curve;
 }
 
-void WaveShaperProcessor::process(AudioBus* source, AudioBus* destination, size_t framesToProcess)
+void WaveShaperProcessor::process(const AudioBus* source, AudioBus* destination, size_t framesToProcess)
 {
     if (!isInitialized()) {
         destination->zero();
@@ -68,7 +68,7 @@ void WaveShaperProcessor::process(AudioBus* source, AudioBus* destination, size_
     if (m_processLock.tryLock()) {        
         // For each channel of our input, process using the corresponding WaveShaperDSPKernel into the output channel.
         for (unsigned i = 0; i < m_kernels.size(); ++i)
-            m_kernels[i]->process(source->channel(i)->data(), destination->channel(i)->data(), framesToProcess);
+            m_kernels[i]->process(source->channel(i)->data(), destination->channel(i)->mutableData(), framesToProcess);
 
         m_processLock.unlock();
     } else {
index 4016e8b..4735efd 100644 (file)
@@ -44,7 +44,7 @@ public:
 
     virtual PassOwnPtr<AudioDSPKernel> createKernel();
 
-    virtual void process(AudioBus* source, AudioBus* destination, size_t framesToProcess);
+    virtual void process(const AudioBus* source, AudioBus* destination, size_t framesToProcess);
 
     void setCurve(Float32Array*);
     Float32Array* curve() { return m_curve.get(); }
index 31e9fbc..a5c7099 100644 (file)
@@ -1,3 +1,19 @@
+2012-01-18  Jer Noble  <jer.noble@apple.com>
+
+        Make WebAudio API const-correct.
+        https://bugs.webkit.org/show_bug.cgi?id=76573
+
+        Reviewed by Daniel Bates.
+
+        The following functions were modified to use the renamed mutableData() accessor:
+        * src/AudioDestinationChromium.cpp:
+        (WebCore::AudioDestinationChromium::FIFO::fillBuffer):
+        (WebCore::AudioDestinationChromium::FIFO::consume):
+        * src/WebAudioData.cpp:
+        (WebCore::WebAudioBus::channelData):
+        * src/WebMediaPlayerClientImpl.cpp:
+        (WebKit::WebMediaPlayerClientImpl::AudioSourceProviderImpl::provideInput):
+
 2012-01-19  Joi Sigurdsson  <joi@chromium.org>
 
         Enable use of precompiled headers in Chromium port on Windows.
index e6ef7d1..6c82385 100644 (file)
@@ -160,8 +160,8 @@ void AudioDestinationChromium::FIFO::consume(AudioBus* destination, size_t frame
     size_t numberOfChannels = m_fifoAudioBus.numberOfChannels();
 
     for (size_t channelIndex = 0; channelIndex < numberOfChannels; ++channelIndex) {
-        float* destinationData = destination->channel(channelIndex)->data();
-        float* sourceData = m_fifoAudioBus.channel(channelIndex)->data();
+        float* destinationData = destination->channel(channelIndex)->mutableData();
+        const float* sourceData = m_fifoAudioBus.channel(channelIndex)->data();
 
         bool isCopyGood = ((m_readIndex < m_fifoLength)
                            && (m_readIndex + part1Length) <= m_fifoLength
@@ -219,8 +219,8 @@ void AudioDestinationChromium::FIFO::fillBuffer(size_t numberOfFrames)
         size_t numberOfChannels = m_fifoAudioBus.numberOfChannels();
         
         for (size_t channelIndex = 0; channelIndex < numberOfChannels; ++channelIndex) {
-            float* destination = m_fifoAudioBus.channel(channelIndex)->data();
-            float* source = m_tempBus.channel(channelIndex)->data();
+            float* destination = m_fifoAudioBus.channel(channelIndex)->mutableData();
+            const float* source = m_tempBus.channel(channelIndex)->data();
 
             bool isCopyGood = (part1Length <= m_providerSize
                                && (part1Length + part2Length) <= m_providerSize
index 50e3814..cc447ba 100644 (file)
@@ -110,7 +110,7 @@ float* WebAudioBus::channelData(unsigned channelIndex)
     if (!m_private)
         return 0;
     ASSERT(channelIndex < numberOfChannels());
-    return m_private->channel(channelIndex)->data();
+    return m_private->channel(channelIndex)->mutableData();
 #else
     ASSERT_NOT_REACHED();
     return 0;
index 891c31f..d2d5525 100644 (file)
@@ -726,7 +726,7 @@ void WebMediaPlayerClientImpl::AudioSourceProviderImpl::provideInput(AudioBus* b
     size_t n = bus->numberOfChannels();
     WebVector<float*> webAudioData(n);
     for (size_t i = 0; i < n; ++i)
-        webAudioData[i] = bus->channel(i)->data();
+        webAudioData[i] = bus->channel(i)->mutableData();
 
     m_webAudioSourceProvider->provideInput(webAudioData, framesToProcess);
 }