WEB_AUDIO does not compile on Leopard 32-bit.
authorjer.noble@apple.com <jer.noble@apple.com@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Wed, 5 Oct 2011 20:53:43 +0000 (20:53 +0000)
committerjer.noble@apple.com <jer.noble@apple.com@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Wed, 5 Oct 2011 20:53:43 +0000 (20:53 +0000)
https://bugs.webkit.org/show_bug.cgi?id=69292

Reviewed by Simon Fraser.

Source/WebCore:

No new tests; covered by all existing audio tests.

Use of float and double within the WEB_AUDIO implementation have been harmonized, with most
calculations done using floats, with narrowPrecisionToFloat() added when necessary to
narrow double results down to floats, and with float constants initialized with float values:
* platform/audio/AudioBus.cpp:
(WebCore::AudioBus::AudioBus):
(WebCore::AudioBus::createByMixingToMono):
* platform/audio/AudioBus.h:
(WebCore::AudioBus::sampleRate):
(WebCore::AudioBus::setSampleRate):
* platform/audio/AudioDSPKernel.h:
(WebCore::AudioDSPKernel::AudioDSPKernel):
(WebCore::AudioDSPKernel::sampleRate):
* platform/audio/AudioDSPKernelProcessor.cpp:
(WebCore::AudioDSPKernelProcessor::AudioDSPKernelProcessor):
* platform/audio/AudioDSPKernelProcessor.h:
* platform/audio/AudioDestination.h:
* platform/audio/AudioFileReader.h:
* platform/audio/AudioProcessor.h:
(WebCore::AudioProcessor::AudioProcessor):
(WebCore::AudioProcessor::sampleRate):
* platform/audio/AudioUtilities.cpp:
(WebCore::AudioUtilities::decibelsToLinear):
(WebCore::AudioUtilities::linearToDecibels):
(WebCore::AudioUtilities::discreteTimeConstantForSampleRate):
* platform/audio/AudioUtilities.h:
* platform/audio/DynamicsCompressor.cpp:
(WebCore::DynamicsCompressor::DynamicsCompressor):
(WebCore::DynamicsCompressor::initializeParameters):
(WebCore::DynamicsCompressor::parameterValue):
(WebCore::DynamicsCompressor::setEmphasisStageParameters):
(WebCore::DynamicsCompressor::process):
* platform/audio/DynamicsCompressor.h:
(WebCore::DynamicsCompressor::sampleRate):
(WebCore::DynamicsCompressor::nyquist):
* platform/audio/DynamicsCompressorKernel.cpp:
(WebCore::saturate):
(WebCore::DynamicsCompressorKernel::DynamicsCompressorKernel):
(WebCore::DynamicsCompressorKernel::process):
* platform/audio/DynamicsCompressorKernel.h:
* platform/audio/EqualPowerPanner.cpp:
(WebCore::EqualPowerPanner::EqualPowerPanner):
* platform/audio/EqualPowerPanner.h:
* platform/audio/HRTFDatabase.cpp:
(WebCore::HRTFDatabase::create):
(WebCore::HRTFDatabase::HRTFDatabase):
* platform/audio/HRTFDatabase.h:
(WebCore::HRTFDatabase::sampleRate):
* platform/audio/HRTFDatabaseLoader.cpp:
(WebCore::HRTFDatabaseLoader::createAndLoadAsynchronouslyIfNecessary):
(WebCore::HRTFDatabaseLoader::HRTFDatabaseLoader):
* platform/audio/HRTFDatabaseLoader.h:
(WebCore::HRTFDatabaseLoader::databaseSampleRate):
* platform/audio/HRTFElevation.cpp:
(WebCore::HRTFElevation::calculateSymmetricKernelsForAzimuthElevation):
(WebCore::HRTFElevation::calculateKernelsForAzimuthElevation):
(WebCore::HRTFElevation::createForSubject):
(WebCore::HRTFElevation::createByInterpolatingSlices):
* platform/audio/HRTFElevation.h:
(WebCore::HRTFElevation::sampleRate):
(WebCore::HRTFElevation::HRTFElevation):
* platform/audio/HRTFKernel.cpp:
(WebCore::extractAverageGroupDelay):
(WebCore::HRTFKernel::HRTFKernel):
(WebCore::HRTFKernel::createInterpolatedKernel):
* platform/audio/HRTFKernel.h:
(WebCore::HRTFKernel::create):
(WebCore::HRTFKernel::frameDelay):
(WebCore::HRTFKernel::sampleRate):
(WebCore::HRTFKernel::HRTFKernel):
* platform/audio/HRTFPanner.cpp:
(WebCore::HRTFPanner::HRTFPanner):
(WebCore::HRTFPanner::fftSizeForSampleRate):
* platform/audio/HRTFPanner.h:
(WebCore::HRTFPanner::sampleRate):
* platform/audio/Panner.cpp:
(WebCore::Panner::create):
* platform/audio/Panner.h:
* platform/audio/chromium/AudioBusChromium.cpp:
(WebCore::AudioBus::loadPlatformResource):
* platform/audio/mac/AudioBusMac.mm:
(WebCore::AudioBus::loadPlatformResource):
* platform/audio/mac/AudioDestinationMac.cpp:
(WebCore::AudioDestination::create):
(WebCore::AudioDestination::hardwareSampleRate):
(WebCore::AudioDestinationMac::AudioDestinationMac):
* platform/audio/mac/AudioDestinationMac.h:
(WebCore::AudioDestinationMac::sampleRate):
* platform/audio/mac/AudioFileReaderMac.cpp:
(WebCore::AudioFileReader::createBus):
(WebCore::createBusFromAudioFile):
(WebCore::createBusFromInMemoryAudioFile):
* platform/audio/mac/AudioFileReaderMac.h:
* webaudio/AsyncAudioDecoder.cpp:
(WebCore::AsyncAudioDecoder::decodeAsync):
(WebCore::AsyncAudioDecoder::DecodingTask::create):
(WebCore::AsyncAudioDecoder::DecodingTask::DecodingTask):
* webaudio/AsyncAudioDecoder.h:
(WebCore::AsyncAudioDecoder::DecodingTask::sampleRate):
* webaudio/AudioBasicProcessorNode.cpp:
(WebCore::AudioBasicProcessorNode::AudioBasicProcessorNode):
* webaudio/AudioBasicProcessorNode.h:
* webaudio/AudioBuffer.cpp:
(WebCore::AudioBuffer::create):
(WebCore::AudioBuffer::createFromAudioFileData):
(WebCore::AudioBuffer::AudioBuffer):
* webaudio/AudioBuffer.h:
(WebCore::AudioBuffer::sampleRate):
* webaudio/AudioBufferSourceNode.cpp:
(WebCore::AudioBufferSourceNode::create):
(WebCore::AudioBufferSourceNode::AudioBufferSourceNode):
(WebCore::AudioBufferSourceNode::process):
(WebCore::AudioBufferSourceNode::renderFromBuffer):
* webaudio/AudioBufferSourceNode.h:
* webaudio/AudioChannelMerger.cpp:
(WebCore::AudioChannelMerger::AudioChannelMerger):
* webaudio/AudioChannelMerger.h:
(WebCore::AudioChannelMerger::create):
* webaudio/AudioChannelSplitter.cpp:
(WebCore::AudioChannelSplitter::AudioChannelSplitter):
* webaudio/AudioChannelSplitter.h:
(WebCore::AudioChannelSplitter::create):
* webaudio/AudioContext.cpp:
(WebCore::AudioContext::createOfflineContext):
(WebCore::AudioContext::AudioContext):
(WebCore::AudioContext::createBuffer):
* webaudio/AudioContext.h:
(WebCore::AudioContext::sampleRate):
* webaudio/AudioDestinationNode.cpp:
(WebCore::AudioDestinationNode::AudioDestinationNode):
* webaudio/AudioDestinationNode.h:
* webaudio/AudioGainNode.cpp:
(WebCore::AudioGainNode::AudioGainNode):
* webaudio/AudioGainNode.h:
(WebCore::AudioGainNode::create):
* webaudio/AudioListener.cpp:
(WebCore::AudioListener::AudioListener):
* webaudio/AudioListener.h:
(WebCore::AudioListener::setPosition):
(WebCore::AudioListener::setOrientation):
(WebCore::AudioListener::setVelocity):
* webaudio/AudioNode.cpp:
(WebCore::AudioNode::AudioNode):
* webaudio/AudioNode.h:
(WebCore::AudioNode::sampleRate):
* webaudio/AudioPannerNode.cpp:
(WebCore::AudioPannerNode::AudioPannerNode):
(WebCore::AudioPannerNode::getAzimuthElevation):
* webaudio/AudioPannerNode.h:
(WebCore::AudioPannerNode::create):
* webaudio/AudioParam.cpp:
(WebCore::AudioParam::value):
(WebCore::AudioParam::smoothedValue):
(WebCore::AudioParam::smooth):
(WebCore::AudioParam::calculateSampleAccurateValues):
* webaudio/AudioParamTimeline.cpp:
(WebCore::AudioParamTimeline::valueForContextTime):
(WebCore::timeToSampleFrame):
(WebCore::AudioParamTimeline::valuesForTimeRangeImpl):
* webaudio/AudioSourceNode.h:
(WebCore::AudioSourceNode::AudioSourceNode):
* webaudio/BiquadFilterNode.cpp:
(WebCore::BiquadFilterNode::BiquadFilterNode):
* webaudio/BiquadFilterNode.h:
(WebCore::BiquadFilterNode::create):
* webaudio/BiquadProcessor.cpp:
(WebCore::BiquadProcessor::BiquadProcessor):
* webaudio/BiquadProcessor.h:
* webaudio/ConvolverNode.cpp:
(WebCore::ConvolverNode::ConvolverNode):
* webaudio/ConvolverNode.h:
(WebCore::ConvolverNode::create):
* webaudio/DefaultAudioDestinationNode.cpp:
(WebCore::DefaultAudioDestinationNode::initialize):
* webaudio/DefaultAudioDestinationNode.h:
(WebCore::DefaultAudioDestinationNode::sampleRate):
* webaudio/DelayDSPKernel.cpp:
(WebCore::DelayDSPKernel::DelayDSPKernel):
(WebCore::DelayDSPKernel::process):
* webaudio/DelayDSPKernel.h:
* webaudio/DelayNode.cpp:
(WebCore::DelayNode::DelayNode):
* webaudio/DelayNode.h:
(WebCore::DelayNode::create):
* webaudio/DelayProcessor.cpp:
(WebCore::DelayProcessor::DelayProcessor):
* webaudio/DelayProcessor.h:
* webaudio/DynamicsCompressorNode.cpp:
(WebCore::DynamicsCompressorNode::DynamicsCompressorNode):
* webaudio/DynamicsCompressorNode.h:
(WebCore::DynamicsCompressorNode::create):
* webaudio/HighPass2FilterNode.cpp:
(WebCore::HighPass2FilterNode::HighPass2FilterNode):
* webaudio/HighPass2FilterNode.h:
(WebCore::HighPass2FilterNode::create):
* webaudio/JavaScriptAudioNode.cpp:
(WebCore::JavaScriptAudioNode::create):
(WebCore::JavaScriptAudioNode::JavaScriptAudioNode):
(WebCore::JavaScriptAudioNode::initialize):
* webaudio/JavaScriptAudioNode.h:
* webaudio/LowPass2FilterNode.cpp:
(WebCore::LowPass2FilterNode::LowPass2FilterNode):
* webaudio/LowPass2FilterNode.h:
(WebCore::LowPass2FilterNode::create):
* webaudio/OfflineAudioDestinationNode.h:
(WebCore::OfflineAudioDestinationNode::sampleRate):
* webaudio/RealtimeAnalyserNode.cpp:
(WebCore::RealtimeAnalyserNode::RealtimeAnalyserNode):
* webaudio/RealtimeAnalyserNode.h:
(WebCore::RealtimeAnalyserNode::create):
* webaudio/WaveShaperDSPKernel.cpp:
(WebCore::WaveShaperDSPKernel::process):
* webaudio/WaveShaperProcessor.cpp:
(WebCore::WaveShaperProcessor::WaveShaperProcessor):
* webaudio/WaveShaperProcessor.h:

Source/WebKit/chromium:

Platform-independent portions of WEB_AUDIO have changed from double -> float, and
platform-specific subclasses must change as well.

* src/AudioDestinationChromium.cpp:
(WebCore::AudioDestination::create):
(WebCore::AudioDestinationChromium::AudioDestinationChromium):
(WebCore::AudioDestination::hardwareSampleRate):
* src/AudioDestinationChromium.h:
(WebCore::AudioDestinationChromium::sampleRate):

git-svn-id: https://svn.webkit.org/repository/webkit/trunk@96745 268f45cc-cd09-0410-ab3c-d52691b4dbfc

93 files changed:
Source/WebCore/ChangeLog
Source/WebCore/platform/audio/AudioBus.cpp
Source/WebCore/platform/audio/AudioBus.h
Source/WebCore/platform/audio/AudioDSPKernel.h
Source/WebCore/platform/audio/AudioDSPKernelProcessor.cpp
Source/WebCore/platform/audio/AudioDSPKernelProcessor.h
Source/WebCore/platform/audio/AudioDestination.h
Source/WebCore/platform/audio/AudioFileReader.h
Source/WebCore/platform/audio/AudioProcessor.h
Source/WebCore/platform/audio/AudioUtilities.cpp
Source/WebCore/platform/audio/AudioUtilities.h
Source/WebCore/platform/audio/DynamicsCompressor.cpp
Source/WebCore/platform/audio/DynamicsCompressor.h
Source/WebCore/platform/audio/DynamicsCompressorKernel.cpp
Source/WebCore/platform/audio/DynamicsCompressorKernel.h
Source/WebCore/platform/audio/EqualPowerPanner.cpp
Source/WebCore/platform/audio/EqualPowerPanner.h
Source/WebCore/platform/audio/HRTFDatabase.cpp
Source/WebCore/platform/audio/HRTFDatabase.h
Source/WebCore/platform/audio/HRTFDatabaseLoader.cpp
Source/WebCore/platform/audio/HRTFDatabaseLoader.h
Source/WebCore/platform/audio/HRTFElevation.cpp
Source/WebCore/platform/audio/HRTFElevation.h
Source/WebCore/platform/audio/HRTFKernel.cpp
Source/WebCore/platform/audio/HRTFKernel.h
Source/WebCore/platform/audio/HRTFPanner.cpp
Source/WebCore/platform/audio/HRTFPanner.h
Source/WebCore/platform/audio/Panner.cpp
Source/WebCore/platform/audio/Panner.h
Source/WebCore/platform/audio/chromium/AudioBusChromium.cpp
Source/WebCore/platform/audio/mac/AudioBusMac.mm
Source/WebCore/platform/audio/mac/AudioDestinationMac.cpp
Source/WebCore/platform/audio/mac/AudioDestinationMac.h
Source/WebCore/platform/audio/mac/AudioFileReaderMac.cpp
Source/WebCore/platform/audio/mac/AudioFileReaderMac.h
Source/WebCore/webaudio/AsyncAudioDecoder.cpp
Source/WebCore/webaudio/AsyncAudioDecoder.h
Source/WebCore/webaudio/AudioBasicProcessorNode.cpp
Source/WebCore/webaudio/AudioBasicProcessorNode.h
Source/WebCore/webaudio/AudioBuffer.cpp
Source/WebCore/webaudio/AudioBuffer.h
Source/WebCore/webaudio/AudioBufferSourceNode.cpp
Source/WebCore/webaudio/AudioBufferSourceNode.h
Source/WebCore/webaudio/AudioChannelMerger.cpp
Source/WebCore/webaudio/AudioChannelMerger.h
Source/WebCore/webaudio/AudioChannelSplitter.cpp
Source/WebCore/webaudio/AudioChannelSplitter.h
Source/WebCore/webaudio/AudioContext.cpp
Source/WebCore/webaudio/AudioContext.h
Source/WebCore/webaudio/AudioDestinationNode.cpp
Source/WebCore/webaudio/AudioDestinationNode.h
Source/WebCore/webaudio/AudioGainNode.cpp
Source/WebCore/webaudio/AudioGainNode.h
Source/WebCore/webaudio/AudioListener.cpp
Source/WebCore/webaudio/AudioListener.h
Source/WebCore/webaudio/AudioNode.cpp
Source/WebCore/webaudio/AudioNode.h
Source/WebCore/webaudio/AudioPannerNode.cpp
Source/WebCore/webaudio/AudioPannerNode.h
Source/WebCore/webaudio/AudioParam.cpp
Source/WebCore/webaudio/AudioParamTimeline.cpp
Source/WebCore/webaudio/AudioSourceNode.h
Source/WebCore/webaudio/BiquadFilterNode.cpp
Source/WebCore/webaudio/BiquadFilterNode.h
Source/WebCore/webaudio/BiquadProcessor.cpp
Source/WebCore/webaudio/BiquadProcessor.h
Source/WebCore/webaudio/ConvolverNode.cpp
Source/WebCore/webaudio/ConvolverNode.h
Source/WebCore/webaudio/DefaultAudioDestinationNode.cpp
Source/WebCore/webaudio/DefaultAudioDestinationNode.h
Source/WebCore/webaudio/DelayDSPKernel.cpp
Source/WebCore/webaudio/DelayDSPKernel.h
Source/WebCore/webaudio/DelayNode.cpp
Source/WebCore/webaudio/DelayNode.h
Source/WebCore/webaudio/DelayProcessor.cpp
Source/WebCore/webaudio/DelayProcessor.h
Source/WebCore/webaudio/DynamicsCompressorNode.cpp
Source/WebCore/webaudio/DynamicsCompressorNode.h
Source/WebCore/webaudio/HighPass2FilterNode.cpp
Source/WebCore/webaudio/HighPass2FilterNode.h
Source/WebCore/webaudio/JavaScriptAudioNode.cpp
Source/WebCore/webaudio/JavaScriptAudioNode.h
Source/WebCore/webaudio/LowPass2FilterNode.cpp
Source/WebCore/webaudio/LowPass2FilterNode.h
Source/WebCore/webaudio/OfflineAudioDestinationNode.h
Source/WebCore/webaudio/RealtimeAnalyserNode.cpp
Source/WebCore/webaudio/RealtimeAnalyserNode.h
Source/WebCore/webaudio/WaveShaperDSPKernel.cpp
Source/WebCore/webaudio/WaveShaperProcessor.cpp
Source/WebCore/webaudio/WaveShaperProcessor.h
Source/WebKit/chromium/ChangeLog
Source/WebKit/chromium/src/AudioDestinationChromium.cpp
Source/WebKit/chromium/src/AudioDestinationChromium.h

index e89d7400776c13132dc4ebd8568d913150bc7b72..3b2bd91c176ac9c822f239116c612003076b4b47 100644 (file)
@@ -1,3 +1,228 @@
+2011-10-05  Jer Noble  <jer.noble@apple.com>
+
+        WEB_AUDIO does not compile on Leopard 32-bit.
+        https://bugs.webkit.org/show_bug.cgi?id=69292
+
+        Reviewed by Simon Fraser.
+
+        No new tests; covered by all existing audio tests.
+
+        Use of float and double within the WEB_AUDIO implementation have been harmonized, with most
+        calculations done using floats, with narrowPrecisionToFloat() added when necessary to
+        narrow double results down to floats, and with float constants initialized with float values:
+        * platform/audio/AudioBus.cpp:
+        (WebCore::AudioBus::AudioBus):
+        (WebCore::AudioBus::createByMixingToMono):
+        * platform/audio/AudioBus.h:
+        (WebCore::AudioBus::sampleRate):
+        (WebCore::AudioBus::setSampleRate):
+        * platform/audio/AudioDSPKernel.h:
+        (WebCore::AudioDSPKernel::AudioDSPKernel):
+        (WebCore::AudioDSPKernel::sampleRate):
+        * platform/audio/AudioDSPKernelProcessor.cpp:
+        (WebCore::AudioDSPKernelProcessor::AudioDSPKernelProcessor):
+        * platform/audio/AudioDSPKernelProcessor.h:
+        * platform/audio/AudioDestination.h:
+        * platform/audio/AudioFileReader.h:
+        * platform/audio/AudioProcessor.h:
+        (WebCore::AudioProcessor::AudioProcessor):
+        (WebCore::AudioProcessor::sampleRate):
+        * platform/audio/AudioUtilities.cpp:
+        (WebCore::AudioUtilities::decibelsToLinear):
+        (WebCore::AudioUtilities::linearToDecibels):
+        (WebCore::AudioUtilities::discreteTimeConstantForSampleRate):
+        * platform/audio/AudioUtilities.h:
+        * platform/audio/DynamicsCompressor.cpp:
+        (WebCore::DynamicsCompressor::DynamicsCompressor):
+        (WebCore::DynamicsCompressor::initializeParameters):
+        (WebCore::DynamicsCompressor::parameterValue):
+        (WebCore::DynamicsCompressor::setEmphasisStageParameters):
+        (WebCore::DynamicsCompressor::process):
+        * platform/audio/DynamicsCompressor.h:
+        (WebCore::DynamicsCompressor::sampleRate):
+        (WebCore::DynamicsCompressor::nyquist):
+        * platform/audio/DynamicsCompressorKernel.cpp:
+        (WebCore::saturate):
+        (WebCore::DynamicsCompressorKernel::DynamicsCompressorKernel):
+        (WebCore::DynamicsCompressorKernel::process):
+        * platform/audio/DynamicsCompressorKernel.h:
+        * platform/audio/EqualPowerPanner.cpp:
+        (WebCore::EqualPowerPanner::EqualPowerPanner):
+        * platform/audio/EqualPowerPanner.h:
+        * platform/audio/HRTFDatabase.cpp:
+        (WebCore::HRTFDatabase::create):
+        (WebCore::HRTFDatabase::HRTFDatabase):
+        * platform/audio/HRTFDatabase.h:
+        (WebCore::HRTFDatabase::sampleRate):
+        * platform/audio/HRTFDatabaseLoader.cpp:
+        (WebCore::HRTFDatabaseLoader::createAndLoadAsynchronouslyIfNecessary):
+        (WebCore::HRTFDatabaseLoader::HRTFDatabaseLoader):
+        * platform/audio/HRTFDatabaseLoader.h:
+        (WebCore::HRTFDatabaseLoader::databaseSampleRate):
+        * platform/audio/HRTFElevation.cpp:
+        (WebCore::HRTFElevation::calculateSymmetricKernelsForAzimuthElevation):
+        (WebCore::HRTFElevation::calculateKernelsForAzimuthElevation):
+        (WebCore::HRTFElevation::createForSubject):
+        (WebCore::HRTFElevation::createByInterpolatingSlices):
+        * platform/audio/HRTFElevation.h:
+        (WebCore::HRTFElevation::sampleRate):
+        (WebCore::HRTFElevation::HRTFElevation):
+        * platform/audio/HRTFKernel.cpp:
+        (WebCore::extractAverageGroupDelay):
+        (WebCore::HRTFKernel::HRTFKernel):
+        (WebCore::HRTFKernel::createInterpolatedKernel):
+        * platform/audio/HRTFKernel.h:
+        (WebCore::HRTFKernel::create):
+        (WebCore::HRTFKernel::frameDelay):
+        (WebCore::HRTFKernel::sampleRate):
+        (WebCore::HRTFKernel::HRTFKernel):
+        * platform/audio/HRTFPanner.cpp:
+        (WebCore::HRTFPanner::HRTFPanner):
+        (WebCore::HRTFPanner::fftSizeForSampleRate):
+        * platform/audio/HRTFPanner.h:
+        (WebCore::HRTFPanner::sampleRate):
+        * platform/audio/Panner.cpp:
+        (WebCore::Panner::create):
+        * platform/audio/Panner.h:
+        * platform/audio/chromium/AudioBusChromium.cpp:
+        (WebCore::AudioBus::loadPlatformResource):
+        * platform/audio/mac/AudioBusMac.mm:
+        (WebCore::AudioBus::loadPlatformResource):
+        * platform/audio/mac/AudioDestinationMac.cpp:
+        (WebCore::AudioDestination::create):
+        (WebCore::AudioDestination::hardwareSampleRate):
+        (WebCore::AudioDestinationMac::AudioDestinationMac):
+        * platform/audio/mac/AudioDestinationMac.h:
+        (WebCore::AudioDestinationMac::sampleRate):
+        * platform/audio/mac/AudioFileReaderMac.cpp:
+        (WebCore::AudioFileReader::createBus):
+        (WebCore::createBusFromAudioFile):
+        (WebCore::createBusFromInMemoryAudioFile):
+        * platform/audio/mac/AudioFileReaderMac.h:
+        * webaudio/AsyncAudioDecoder.cpp:
+        (WebCore::AsyncAudioDecoder::decodeAsync):
+        (WebCore::AsyncAudioDecoder::DecodingTask::create):
+        (WebCore::AsyncAudioDecoder::DecodingTask::DecodingTask):
+        * webaudio/AsyncAudioDecoder.h:
+        (WebCore::AsyncAudioDecoder::DecodingTask::sampleRate):
+        * webaudio/AudioBasicProcessorNode.cpp:
+        (WebCore::AudioBasicProcessorNode::AudioBasicProcessorNode):
+        * webaudio/AudioBasicProcessorNode.h:
+        * webaudio/AudioBuffer.cpp:
+        (WebCore::AudioBuffer::create):
+        (WebCore::AudioBuffer::createFromAudioFileData):
+        (WebCore::AudioBuffer::AudioBuffer):
+        * webaudio/AudioBuffer.h:
+        (WebCore::AudioBuffer::sampleRate):
+        * webaudio/AudioBufferSourceNode.cpp:
+        (WebCore::AudioBufferSourceNode::create):
+        (WebCore::AudioBufferSourceNode::AudioBufferSourceNode):
+        (WebCore::AudioBufferSourceNode::process):
+        (WebCore::AudioBufferSourceNode::renderFromBuffer):
+        * webaudio/AudioBufferSourceNode.h:
+        * webaudio/AudioChannelMerger.cpp:
+        (WebCore::AudioChannelMerger::AudioChannelMerger):
+        * webaudio/AudioChannelMerger.h:
+        (WebCore::AudioChannelMerger::create):
+        * webaudio/AudioChannelSplitter.cpp:
+        (WebCore::AudioChannelSplitter::AudioChannelSplitter):
+        * webaudio/AudioChannelSplitter.h:
+        (WebCore::AudioChannelSplitter::create):
+        * webaudio/AudioContext.cpp:
+        (WebCore::AudioContext::createOfflineContext):
+        (WebCore::AudioContext::AudioContext):
+        (WebCore::AudioContext::createBuffer):
+        * webaudio/AudioContext.h:
+        (WebCore::AudioContext::sampleRate):
+        * webaudio/AudioDestinationNode.cpp:
+        (WebCore::AudioDestinationNode::AudioDestinationNode):
+        * webaudio/AudioDestinationNode.h:
+        * webaudio/AudioGainNode.cpp:
+        (WebCore::AudioGainNode::AudioGainNode):
+        * webaudio/AudioGainNode.h:
+        (WebCore::AudioGainNode::create):
+        * webaudio/AudioListener.cpp:
+        (WebCore::AudioListener::AudioListener):
+        * webaudio/AudioListener.h:
+        (WebCore::AudioListener::setPosition):
+        (WebCore::AudioListener::setOrientation):
+        (WebCore::AudioListener::setVelocity):
+        * webaudio/AudioNode.cpp:
+        (WebCore::AudioNode::AudioNode):
+        * webaudio/AudioNode.h:
+        (WebCore::AudioNode::sampleRate):
+        * webaudio/AudioPannerNode.cpp:
+        (WebCore::AudioPannerNode::AudioPannerNode):
+        (WebCore::AudioPannerNode::getAzimuthElevation):
+        * webaudio/AudioPannerNode.h:
+        (WebCore::AudioPannerNode::create):
+        * webaudio/AudioParam.cpp:
+        (WebCore::AudioParam::value):
+        (WebCore::AudioParam::smoothedValue):
+        (WebCore::AudioParam::smooth):
+        (WebCore::AudioParam::calculateSampleAccurateValues):
+        * webaudio/AudioParamTimeline.cpp:
+        (WebCore::AudioParamTimeline::valueForContextTime):
+        (WebCore::timeToSampleFrame):
+        (WebCore::AudioParamTimeline::valuesForTimeRangeImpl):
+        * webaudio/AudioSourceNode.h:
+        (WebCore::AudioSourceNode::AudioSourceNode):
+        * webaudio/BiquadFilterNode.cpp:
+        (WebCore::BiquadFilterNode::BiquadFilterNode):
+        * webaudio/BiquadFilterNode.h:
+        (WebCore::BiquadFilterNode::create):
+        * webaudio/BiquadProcessor.cpp:
+        (WebCore::BiquadProcessor::BiquadProcessor):
+        * webaudio/BiquadProcessor.h:
+        * webaudio/ConvolverNode.cpp:
+        (WebCore::ConvolverNode::ConvolverNode):
+        * webaudio/ConvolverNode.h:
+        (WebCore::ConvolverNode::create):
+        * webaudio/DefaultAudioDestinationNode.cpp:
+        (WebCore::DefaultAudioDestinationNode::initialize):
+        * webaudio/DefaultAudioDestinationNode.h:
+        (WebCore::DefaultAudioDestinationNode::sampleRate):
+        * webaudio/DelayDSPKernel.cpp:
+        (WebCore::DelayDSPKernel::DelayDSPKernel):
+        (WebCore::DelayDSPKernel::process):
+        * webaudio/DelayDSPKernel.h:
+        * webaudio/DelayNode.cpp:
+        (WebCore::DelayNode::DelayNode):
+        * webaudio/DelayNode.h:
+        (WebCore::DelayNode::create):
+        * webaudio/DelayProcessor.cpp:
+        (WebCore::DelayProcessor::DelayProcessor):
+        * webaudio/DelayProcessor.h:
+        * webaudio/DynamicsCompressorNode.cpp:
+        (WebCore::DynamicsCompressorNode::DynamicsCompressorNode):
+        * webaudio/DynamicsCompressorNode.h:
+        (WebCore::DynamicsCompressorNode::create):
+        * webaudio/HighPass2FilterNode.cpp:
+        (WebCore::HighPass2FilterNode::HighPass2FilterNode):
+        * webaudio/HighPass2FilterNode.h:
+        (WebCore::HighPass2FilterNode::create):
+        * webaudio/JavaScriptAudioNode.cpp:
+        (WebCore::JavaScriptAudioNode::create):
+        (WebCore::JavaScriptAudioNode::JavaScriptAudioNode):
+        (WebCore::JavaScriptAudioNode::initialize):
+        * webaudio/JavaScriptAudioNode.h:
+        * webaudio/LowPass2FilterNode.cpp:
+        (WebCore::LowPass2FilterNode::LowPass2FilterNode):
+        * webaudio/LowPass2FilterNode.h:
+        (WebCore::LowPass2FilterNode::create):
+        * webaudio/OfflineAudioDestinationNode.h:
+        (WebCore::OfflineAudioDestinationNode::sampleRate):
+        * webaudio/RealtimeAnalyserNode.cpp:
+        (WebCore::RealtimeAnalyserNode::RealtimeAnalyserNode):
+        * webaudio/RealtimeAnalyserNode.h:
+        (WebCore::RealtimeAnalyserNode::create):
+        * webaudio/WaveShaperDSPKernel.cpp:
+        (WebCore::WaveShaperDSPKernel::process):
+        * webaudio/WaveShaperProcessor.cpp:
+        (WebCore::WaveShaperProcessor::WaveShaperProcessor):
+        * webaudio/WaveShaperProcessor.h:
+
+
 2011-10-05  Alexey Proskuryakov  <ap@apple.com>
 
         [Mac] Make built-in PDF description localizable
index e34f7dbd5e70c25ccdc75311a8927533797b77eb..ad29af1aad237237091f55936227b931fe999cc8 100644 (file)
@@ -48,9 +48,9 @@ using namespace VectorMath;
     
 AudioBus::AudioBus(unsigned numberOfChannels, size_t length, bool allocate)
     : m_length(length)
-    , m_busGain(1.0)
+    , m_busGain(1)
     , m_isFirstTime(true)
-    , m_sampleRate(0.0)
+    , m_sampleRate(0)
 {
     m_channels.reserveInitialCapacity(numberOfChannels);
 
@@ -466,7 +466,7 @@ PassOwnPtr<AudioBus> AudioBus::createByMixingToMono(AudioBus* sourceBus)
         
             // Do the mono mixdown.
             for (unsigned i = 0; i < n; ++i)
-                destination[i] = 0.5 * (sourceL[i] + sourceR[i]);
+                destination[i] = (sourceL[i] + sourceR[i]) / 2;
 
             destinationBus->setSampleRate(sourceBus->sampleRate());    
             return destinationBus.release();
index a87f55d3132ec3a956e2e946fb64f1a045791e75..53fb4b969773cda45a0330a5d85f19011c2c4888 100644 (file)
@@ -76,8 +76,8 @@ public:
     size_t length() const { return m_length; }
 
     // Sample-rate : 0.0 if unknown or "don't care"
-    double sampleRate() const { return m_sampleRate; }
-    void setSampleRate(double sampleRate) { m_sampleRate = sampleRate; }
+    float sampleRate() const { return m_sampleRate; }
+    void setSampleRate(float sampleRate) { m_sampleRate = sampleRate; }
 
     // Zeroes all channels.
     void zero();
@@ -133,7 +133,7 @@ public:
     // Makes maximum absolute value == 1.0 (if possible).
     void normalize();
 
-    static PassOwnPtr<AudioBus> loadPlatformResource(const char* name, double sampleRate);
+    static PassOwnPtr<AudioBus> loadPlatformResource(const char* name, float sampleRate);
 
 protected:
     AudioBus() { };
@@ -149,7 +149,7 @@ protected:
 
     double m_busGain;
     bool m_isFirstTime;
-    double m_sampleRate; // 0.0 if unknown or N/A
+    float m_sampleRate; // 0.0 if unknown or N/A
 };
 
 } // WebCore
index d0719c5659727a4e6d6506986d4ce7f1c92ffb27..f33c9ede4d3c0a1363632e5c084ed0ebdab522c0 100644 (file)
@@ -45,7 +45,7 @@ public:
     {
     }
 
-    AudioDSPKernel(double sampleRate)
+    AudioDSPKernel(float sampleRate)
         : m_kernelProcessor(0)
         , m_sampleRate(sampleRate)
     {
@@ -57,7 +57,7 @@ public:
     virtual void process(const float* source, float* destination, size_t framesToProcess) = 0;
     virtual void reset() = 0;
 
-    double sampleRate() const { return m_sampleRate; }
+    float sampleRate() const { return m_sampleRate; }
     double nyquist() const { return 0.5 * sampleRate(); }
 
     AudioDSPKernelProcessor* processor() { return m_kernelProcessor; }
@@ -65,7 +65,7 @@ public:
 
 protected:
     AudioDSPKernelProcessor* m_kernelProcessor;
-    double m_sampleRate;
+    float m_sampleRate;
 };
 
 } // namespace WebCore
index 45068db421db59f6e8fc676565149d57f3fb454a..cf4d2d35175a53893e15cef503f0bf9e36a0ac83 100644 (file)
@@ -39,7 +39,7 @@
 namespace WebCore {
 
 // setNumberOfChannels() may later be called if the object is not yet in an "initialized" state.
-AudioDSPKernelProcessor::AudioDSPKernelProcessor(double sampleRate, unsigned numberOfChannels)
+AudioDSPKernelProcessor::AudioDSPKernelProcessor(float sampleRate, unsigned numberOfChannels)
     : AudioProcessor(sampleRate)
     , m_numberOfChannels(numberOfChannels)
     , m_hasJustReset(true)
index e87a81026a9d4cac2a8e3e336f45dea83eed677a..40b5ab8d67383e2dc897cf753f649c1c0037b3b0 100644 (file)
@@ -50,7 +50,7 @@ class AudioProcessor;
 class AudioDSPKernelProcessor : public AudioProcessor {
 public:
     // numberOfChannels may be later changed if object is not yet in an "initialized" state
-    AudioDSPKernelProcessor(double sampleRate, unsigned numberOfChannels);
+    AudioDSPKernelProcessor(float sampleRate, unsigned numberOfChannels);
 
     // Subclasses create the appropriate type of processing kernel here.
     // We'll call this to create a kernel for each channel.
index 9498110371398f0b55bde89716d850f38a8de687..75882c0a635ff3f8774c529c7404f1ded1d53c33 100644 (file)
@@ -41,7 +41,7 @@ class AudioSourceProvider;
 
 class AudioDestination {
 public:
-    static PassOwnPtr<AudioDestination> create(AudioSourceProvider&, double sampleRate);
+    static PassOwnPtr<AudioDestination> create(AudioSourceProvider&, float sampleRate);
 
     virtual ~AudioDestination() { }
 
@@ -50,8 +50,8 @@ public:
     virtual bool isPlaying() = 0;
 
     // Sample-rate conversion may happen in AudioDestination to the hardware sample-rate
-    virtual double sampleRate() const = 0;
-    static double hardwareSampleRate();
+    virtual float sampleRate() const = 0;
+    static float hardwareSampleRate();
 };
 
 } // namespace WebCore
index 3c024907a1bc1fb753f84ffc854b0a88b55f2adf..0c8dc6e4993bddd03de4ae97b1256fb6dd4053d3 100644 (file)
@@ -42,9 +42,9 @@ class AudioBus;
 // sampleRate will be made (if it doesn't already match the file's sample-rate).
 // The created buffer will have its sample-rate set correctly to the result.
 
-PassOwnPtr<AudioBus> createBusFromInMemoryAudioFile(const void* data, size_t dataSize, bool mixToMono, double sampleRate);
+PassOwnPtr<AudioBus> createBusFromInMemoryAudioFile(const void* data, size_t dataSize, bool mixToMono, float sampleRate);
 
-PassOwnPtr<AudioBus> createBusFromAudioFile(const char* filePath, bool mixToMono, double sampleRate);
+PassOwnPtr<AudioBus> createBusFromAudioFile(const char* filePath, bool mixToMono, float sampleRate);
                                 
 // May pass in 0.0 for sampleRate in which case it will use the AudioBus's sampleRate                               
 void writeBusToAudioFile(AudioBus* bus, const char* filePath, double fileSampleRate);
index 69ba40f804a8656f9481beade4fbd4abd19ca186..2d7b60a7ce60eb0665e2d288fbc949666ffdcbcf 100644 (file)
@@ -41,7 +41,7 @@ class AudioBus;
 
 class AudioProcessor {
 public:
-    AudioProcessor(double sampleRate)
+    AudioProcessor(float sampleRate)
         : m_initialized(false)
         , m_sampleRate(sampleRate)
     {
@@ -63,11 +63,11 @@ public:
 
     bool isInitialized() const { return m_initialized; }
 
-    double sampleRate() const { return m_sampleRate; }
+    float sampleRate() const { return m_sampleRate; }
 
 protected:
     bool m_initialized;
-    double m_sampleRate;
+    float m_sampleRate;
 };
 
 } // namespace WebCore
index 7a4b32e59189301fd18ba18e9d97ea01603b9cde..1a02b7e10df2050c7fab159f16db7c7d0f41afde 100644 (file)
@@ -33,27 +33,27 @@ namespace WebCore {
 
 namespace AudioUtilities {
 
-double decibelsToLinear(double decibels)
+float decibelsToLinear(float decibels)
 {
-    return pow(10.0, 0.05 * decibels);
+    return powf(10, 0.05f * decibels);
 }
 
-double linearToDecibels(double linear)
+float linearToDecibels(float linear)
 {
     // It's not possible to calculate decibels for a zero linear value since it would be -Inf.
     // -1000.0 dB represents a very tiny linear value in case we ever reach this case.
     ASSERT(linear);
     if (!linear)
-        return -1000.0;
+        return -1000;
         
-    return 20.0 * log10(linear);
+    return 20 * log10f(linear);
 }
 
-double discreteTimeConstantForSampleRate(double timeConstant, double sampleRate)
+float discreteTimeConstantForSampleRate(float timeConstant, float sampleRate)
 {
     // hardcoded value is temporary build fix for Windows.
     // FIXME: replace hardcode 2.718282 with M_E until the correct MathExtras.h solution is determined.
-    return 1.0 - pow(1.0 / 2.718282, 1.0 / (sampleRate * timeConstant));
+    return 1 - powf(1 / 2.718282f, 1 / (sampleRate * timeConstant));
 }
     
 } // AudioUtilites
index 7cf44cef2b4d3ad22d84a0243bbd174e64c5d227..c98a4c8de8bf11ffb203f5e65fa1ef062b03bfc7 100644 (file)
@@ -30,13 +30,13 @@ namespace WebCore {
 namespace AudioUtilities {
 
 // Standard functions for converting to and from decibel values from linear.
-double linearToDecibels(double);
-double decibelsToLinear(double);
+float linearToDecibels(float);
+float decibelsToLinear(float);
 
 // timeConstant is the time it takes a first-order linear time-invariant system
 // to reach the value 1 - 1/e (around 63.2%) given a step input response.
 // discreteTimeConstantForSampleRate() will return the discrete time-constant for the specific sampleRate.
-double discreteTimeConstantForSampleRate(double timeConstant, double sampleRate);
+float discreteTimeConstantForSampleRate(float timeConstant, float sampleRate);
     
 } // AudioUtilites
 
index 15eec9f40aaf6d0221aa5e6b311552e858d71c36..c4795f4f81ebfc8ed9bf76a598414e7899d82edd 100644 (file)
@@ -40,7 +40,7 @@ namespace WebCore {
 
 using namespace AudioUtilities;
     
-DynamicsCompressor::DynamicsCompressor(bool isStereo, double sampleRate)
+DynamicsCompressor::DynamicsCompressor(bool isStereo, float sampleRate)
     : m_isStereo(isStereo)
     , m_sampleRate(sampleRate)
     , m_compressor(sampleRate)
@@ -59,17 +59,17 @@ void DynamicsCompressor::initializeParameters()
     
     m_parameters[ParamThreshold] = -24; // dB
     m_parameters[ParamHeadroom] = 21; // dB
-    m_parameters[ParamAttack] = 0.003; // seconds
-    m_parameters[ParamRelease] = 0.250; // seconds
-    m_parameters[ParamPreDelay] = 0.006; // seconds
+    m_parameters[ParamAttack] = 0.003f; // seconds
+    m_parameters[ParamRelease] = 0.250f; // seconds
+    m_parameters[ParamPreDelay] = 0.006f; // seconds
 
     // Release zone values 0 -> 1.
-    m_parameters[ParamReleaseZone1] = 0.09;
-    m_parameters[ParamReleaseZone2] = 0.16;
-    m_parameters[ParamReleaseZone3] = 0.42;
-    m_parameters[ParamReleaseZone4] = 0.98;
+    m_parameters[ParamReleaseZone1] = 0.09f;
+    m_parameters[ParamReleaseZone2] = 0.16f;
+    m_parameters[ParamReleaseZone3] = 0.42f;
+    m_parameters[ParamReleaseZone4] = 0.98f;
 
-    m_parameters[ParamFilterStageGain] = 4.4; // dB
+    m_parameters[ParamFilterStageGain] = 4.4f; // dB
     m_parameters[ParamFilterStageRatio] = 2;
     m_parameters[ParamFilterAnchor] = 15000 / nyquist();
     
@@ -79,7 +79,7 @@ void DynamicsCompressor::initializeParameters()
     m_parameters[ParamEffectBlend] = 1;
 }
 
-double DynamicsCompressor::parameterValue(unsigned parameterID)
+float DynamicsCompressor::parameterValue(unsigned parameterID)
 {
     ASSERT(parameterID < ParamLast);
     return m_parameters[parameterID];
@@ -90,8 +90,8 @@ void DynamicsCompressor::setEmphasisStageParameters(unsigned stageIndex, float g
     float gk = 1 - gain / 20;
     float f1 = normalizedFrequency * gk;
     float f2 = normalizedFrequency / gk;
-    float r1 = exp(-f1 * piDouble);
-    float r2 = exp(-f2 * piDouble);
+    float r1 = expf(-f1 * piFloat);
+    float r2 = expf(-f2 * piFloat);
 
     // Set pre-filter zero and pole to create an emphasis filter.
     m_preFilter[stageIndex].setZero(r1);
@@ -170,10 +170,10 @@ void DynamicsCompressor::process(AudioBus* sourceBus, AudioBus* destinationBus,
     // 1 mixes in only the compressed signal.
     float effectBlend = parameterValue(ParamEffectBlend);
 
-    double releaseZone1 = parameterValue(ParamReleaseZone1);
-    double releaseZone2 = parameterValue(ParamReleaseZone2);
-    double releaseZone3 = parameterValue(ParamReleaseZone3);
-    double releaseZone4 = parameterValue(ParamReleaseZone4);
+    float releaseZone1 = parameterValue(ParamReleaseZone1);
+    float releaseZone2 = parameterValue(ParamReleaseZone2);
+    float releaseZone3 = parameterValue(ParamReleaseZone3);
+    float releaseZone4 = parameterValue(ParamReleaseZone4);
 
     // Apply compression to the pre-filtered signal.
     // The processing is performed in place.
index d0c036f29aeec4176cc543cc8c15d21b841950af..21529512def190466ceca85ca81c3d876eb00fa6 100644 (file)
@@ -62,26 +62,26 @@ public:
         ParamLast
     };
 
-    DynamicsCompressor(bool isStereo, double sampleRate);
+    DynamicsCompressor(bool isStereo, float sampleRate);
 
     void process(AudioBus* sourceBus, AudioBus* destinationBus, unsigned framesToProcess);
     void reset();
 
-    double parameterValue(unsigned parameterID);
+    float parameterValue(unsigned parameterID);
 
     bool isStereo() const { return m_isStereo; }
-    double sampleRate() const { return m_sampleRate; }
-    double nyquist() const { return 0.5 * m_sampleRate; }
+    float sampleRate() const { return m_sampleRate; }
+    float nyquist() const { return m_sampleRate / 2; }
 
 protected:
     // m_parameters holds the tweakable compressor parameters.
     // FIXME: expose some of the most important ones (such as threshold, attack, release)
     // as DynamicsCompressorNode attributes.
-    double m_parameters[ParamLast];
+    float m_parameters[ParamLast];
     void initializeParameters();
 
     bool m_isStereo;
-    double m_sampleRate;
+    float m_sampleRate;
 
     // Emphasis filter controls.
     float m_lastFilterStageRatio;
index a0723461bb21c53ca716fd6fe7b6c57d487c3bbf..2229c3815b5bfd290ae1ff15d3554f5dfb30703e 100644 (file)
@@ -43,15 +43,15 @@ namespace WebCore {
 using namespace AudioUtilities;
 
 // Metering hits peaks instantly, but releases this fast (in seconds).
-const double meteringReleaseTimeConstant = 0.325;
+const float meteringReleaseTimeConstant = 0.325f;
     
 // Exponential saturation curve.
-static double saturate(double x, double k)
+static float saturate(float x, float k)
 {
     return 1 - exp(-k * x);
 }
 
-DynamicsCompressorKernel::DynamicsCompressorKernel(double sampleRate)
+DynamicsCompressorKernel::DynamicsCompressorKernel(float sampleRate)
     : m_sampleRate(sampleRate)
     , m_lastPreDelayFrames(DefaultPreDelayFrames)
     , m_preDelayBufferL(MaxPreDelayFrames)
@@ -108,18 +108,18 @@ void DynamicsCompressorKernel::process(float* sourceL,
     float wetMix = effectBlend;
 
     // Threshold and headroom.
-    double linearThreshold = decibelsToLinear(dbThreshold);
-    double linearHeadroom = decibelsToLinear(dbHeadroom);
+    float linearThreshold = decibelsToLinear(dbThreshold);
+    float linearHeadroom = decibelsToLinear(dbHeadroom);
 
     // Makeup gain.
-    double maximum = 1.05 * linearHeadroom * linearThreshold;
-    double kk = (maximum - linearThreshold);
-    double inverseKK = 1 / kk;
+    float maximum = 1.05f * linearHeadroom * linearThreshold;
+    float kk = (maximum - linearThreshold);
+    float inverseKK = 1 / kk;
 
-    double fullRangeGain = (linearThreshold + kk * saturate(1 - linearThreshold, 1));
-    double fullRangeMakeupGain = 1 / fullRangeGain;
+    float fullRangeGain = (linearThreshold + kk * saturate(1 - linearThreshold, 1));
+    float fullRangeMakeupGain = 1 / fullRangeGain;
     // Empirical/perceptual tuning.
-    fullRangeMakeupGain = pow(fullRangeMakeupGain, 0.6);
+    fullRangeMakeupGain = powf(fullRangeMakeupGain, 0.6f);
 
     float masterLinearGain = decibelsToLinear(dbPostGain) * fullRangeMakeupGain;
 
@@ -131,26 +131,26 @@ void DynamicsCompressorKernel::process(float* sourceL,
     float releaseFrames = sampleRate * releaseTime;
     
     // Detector release time.
-    double satReleaseTime = 0.0025;
-    double satReleaseFrames = satReleaseTime * sampleRate;
+    float satReleaseTime = 0.0025f;
+    float satReleaseFrames = satReleaseTime * sampleRate;
 
     // Create a smooth function which passes through four points.
 
     // Polynomial of the form
     // y = a + b*x + c*x^2 + d*x^3 + e*x^4;
 
-    double y1 = releaseFrames * releaseZone1;
-    double y2 = releaseFrames * releaseZone2;
-    double y3 = releaseFrames * releaseZone3;
-    double y4 = releaseFrames * releaseZone4;
+    float y1 = releaseFrames * releaseZone1;
+    float y2 = releaseFrames * releaseZone2;
+    float y3 = releaseFrames * releaseZone3;
+    float y4 = releaseFrames * releaseZone4;
 
     // All of these coefficients were derived for 4th order polynomial curve fitting where the y values
     // match the evenly spaced x values as follows: (y1 : x == 0, y2 : x == 1, y3 : x == 2, y4 : x == 3)
-    double kA = 0.9999999999999998*y1 + 1.8432219684323923e-16*y2 - 1.9373394351676423e-16*y3 + 8.824516011816245e-18*y4;
-    double kB = -1.5788320352845888*y1 + 2.3305837032074286*y2 - 0.9141194204840429*y3 + 0.1623677525612032*y4;
-    double kC = 0.5334142869106424*y1 - 1.272736789213631*y2 + 0.9258856042207512*y3 - 0.18656310191776226*y4;
-    double kD = 0.08783463138207234*y1 - 0.1694162967925622*y2 + 0.08588057951595272*y3 - 0.00429891410546283*y4;
-    double kE = -0.042416883008123074*y1 + 0.1115693827987602*y2 - 0.09764676325265872*y3 + 0.028494263462021576*y4;
+    float kA = 0.9999999999999998f*y1 + 1.8432219684323923e-16f*y2 - 1.9373394351676423e-16f*y3 + 8.824516011816245e-18f*y4;
+    float kB = -1.5788320352845888f*y1 + 2.3305837032074286f*y2 - 0.9141194204840429f*y3 + 0.1623677525612032f*y4;
+    float kC = 0.5334142869106424f*y1 - 1.272736789213631f*y2 + 0.9258856042207512f*y3 - 0.18656310191776226f*y4;
+    float kD = 0.08783463138207234f*y1 - 0.1694162967925622f*y2 + 0.08588057951595272f*y3 - 0.00429891410546283f*y4;
+    float kE = -0.042416883008123074f*y1 + 0.1115693827987602f*y2 - 0.09764676325265872f*y3 + 0.028494263462021576f*y4;
 
     // x ranges from 0 -> 3       0    1    2   3
     //                           -15  -10  -5   0db
@@ -177,7 +177,7 @@ void DynamicsCompressorKernel::process(float* sourceL,
         float desiredGain = m_detectorAverage;
 
         // Pre-warp so we get desiredGain after sin() warp below.
-        double scaledDesiredGain = asin(desiredGain) / (0.5 * piDouble);
+        float scaledDesiredGain = asinf(desiredGain) / (0.5f * piFloat);
 
         // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
         // Deal with envelopes
@@ -190,7 +190,7 @@ void DynamicsCompressorKernel::process(float* sourceL,
         bool isReleasing = scaledDesiredGain > m_compressorGain;
 
         // compressionDiffDb is the difference between current compression level and the desired level.
-        double compressionDiffDb = linearToDecibels(m_compressorGain / scaledDesiredGain);
+        float compressionDiffDb = linearToDecibels(m_compressorGain / scaledDesiredGain);
 
         if (isReleasing) {
             // Release mode - compressionDiffDb should be negative dB
@@ -205,20 +205,20 @@ void DynamicsCompressorKernel::process(float* sourceL,
             // Adaptive release - higher compression (lower compressionDiffDb)  releases faster.
 
             // Contain within range: -12 -> 0 then scale to go from 0 -> 3
-            double x = compressionDiffDb;
-            x = max(-12., x);
-            x = min(0., x);
-            x = 0.25 * (x + 12);
+            float x = compressionDiffDb;
+            x = max(-12.0f, x);
+            x = min(0.0f, x);
+            x = 0.25f * (x + 12);
 
             // Compute adaptive release curve using 4th order polynomial.
             // Normal values for the polynomial coefficients would create a monotonically increasing function.
-            double x2 = x * x;
-            double x3 = x2 * x;
-            double x4 = x2 * x2;
-            double releaseFrames = kA + kB * x + kC * x2 + kD * x3 + kE * x4;
+            float x2 = x * x;
+            float x3 = x2 * x;
+            float x4 = x2 * x2;
+            float releaseFrames = kA + kB * x + kC * x2 + kD * x3 + kE * x4;
 
 #define kSpacingDb 5
-            double dbPerFrame = kSpacingDb / releaseFrames;
+            float dbPerFrame = kSpacingDb / releaseFrames;
 
             envelopeRate = decibelsToLinear(dbPerFrame);
         } else {
@@ -235,10 +235,10 @@ void DynamicsCompressorKernel::process(float* sourceL,
             if (m_maxAttackCompressionDiffDb == -1 || m_maxAttackCompressionDiffDb < compressionDiffDb)
                 m_maxAttackCompressionDiffDb = compressionDiffDb;
 
-            double effAttenDiffDb = max(0.5f, m_maxAttackCompressionDiffDb);
+            float effAttenDiffDb = max(0.5f, m_maxAttackCompressionDiffDb);
 
-            double x = 0.25 / effAttenDiffDb;
-            envelopeRate = 1 - pow(x, double(1 / attackFrames));
+            float x = 0.25f / effAttenDiffDb;
+            envelopeRate = 1 - powf(x, 1 / attackFrames);
         }
 
         // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -264,7 +264,7 @@ void DynamicsCompressorKernel::process(float* sourceL,
                     float undelayedL = *sourceL++;
                     float undelayedR = *sourceR++;
 
-                    compressorInput = 0.5 * (undelayedL + undelayedR);
+                    compressorInput = 0.5f * (undelayedL + undelayedR);
 
                     inputL = delayBufferL[preDelayReadIndex];
                     inputR = delayBufferR[preDelayReadIndex];
@@ -284,24 +284,24 @@ void DynamicsCompressorKernel::process(float* sourceL,
                 // Calculate shaped power on undelayed input.
 
                 float scaledInput = compressorInput;
-                double absInput = scaledInput > 0 ? scaledInput : -scaledInput;
+                float absInput = scaledInput > 0 ? scaledInput : -scaledInput;
 
                 // Put through shaping curve.
                 // This is linear up to the threshold, then exponentially approaches the maximum (headroom amount above threshold).
                 // The transition from the threshold to the exponential portion is smooth (1st derivative matched).
-                double shapedInput = absInput < linearThreshold ? absInput : linearThreshold + kk * saturate(absInput - linearThreshold, inverseKK);
+                float shapedInput = absInput < linearThreshold ? absInput : linearThreshold + kk * saturate(absInput - linearThreshold, inverseKK);
 
-                double attenuation = absInput <= 0.0001 ? 1 : shapedInput / absInput;
+                float attenuation = absInput <= 0.0001f ? 1 : shapedInput / absInput;
 
-                double attenuationDb = -linearToDecibels(attenuation);
-                attenuationDb = max(2., attenuationDb);
+                float attenuationDb = -linearToDecibels(attenuation);
+                attenuationDb = max(2.0f, attenuationDb);
 
-                double dbPerFrame = attenuationDb / satReleaseFrames;
+                float dbPerFrame = attenuationDb / satReleaseFrames;
 
-                double satReleaseRate = decibelsToLinear(dbPerFrame) - 1;
+                float satReleaseRate = decibelsToLinear(dbPerFrame) - 1;
 
                 bool isRelease = (attenuation > detectorAverage);
-                double rate = isRelease ? satReleaseRate : 1;
+                float rate = isRelease ? satReleaseRate : 1;
 
                 detectorAverage += (attenuation - detectorAverage) * rate;
                 detectorAverage = min(1.0f, detectorAverage);
@@ -323,13 +323,13 @@ void DynamicsCompressorKernel::process(float* sourceL,
                 }
 
                 // Warp pre-compression gain to smooth out sharp exponential transition points.
-                double postWarpCompressorGain = sin(0.5 * piDouble * compressorGain);
+                float postWarpCompressorGain = sinf(0.5f * piFloat * compressorGain);
 
                 // Calculate total gain using master gain and effect blend.
-                double totalGain = dryMix + wetMix * masterLinearGain * postWarpCompressorGain;
+                float totalGain = dryMix + wetMix * masterLinearGain * postWarpCompressorGain;
 
                 // Calculate metering.
-                double dbRealGain = 20 * log10(postWarpCompressorGain);
+                float dbRealGain = 20 * log10(postWarpCompressorGain);
                 if (dbRealGain < m_meteringGain)
                     m_meteringGain = dbRealGain;
                 else
index a6e70d350f12027d07806e8119c1f32a35bf651b..8e5f7099d88bb55ae78d478cd796dcf4a196d218 100644 (file)
@@ -35,7 +35,7 @@ namespace WebCore {
 
 class DynamicsCompressorKernel {
 public:
-    DynamicsCompressorKernel(double sampleRate);
+    DynamicsCompressorKernel(float sampleRate);
 
     // Performs stereo-linked compression.
     void process(float *sourceL,
index e2c2a5c46ace1714774385b05610849564188c1f..13a19d6cd4451d7b2f9882b50c4ed9e00320b3b6 100644 (file)
 #include <wtf/MathExtras.h>
 
 // Use a 50ms smoothing / de-zippering time-constant.
-const double SmoothingTimeConstant = 0.050;
+const float SmoothingTimeConstant = 0.050f;
 
 using namespace std;
 
 namespace WebCore {
 
-EqualPowerPanner::EqualPowerPanner(double sampleRate)
+EqualPowerPanner::EqualPowerPanner(float sampleRate)
     : Panner(PanningModelEqualPower)
     , m_isFirstRender(true)
     , m_gainL(0.0)
index f20617eb47cc354430efc1673ca49113cebfb1f5..4f6001df91420ec544afcedcdf6897e84480d750 100644 (file)
@@ -33,7 +33,7 @@ namespace WebCore {
 
 class EqualPowerPanner : public Panner {
 public:
-    EqualPowerPanner(double sampleRate);
+    EqualPowerPanner(float sampleRate);
 
     virtual void pan(double azimuth, double elevation, AudioBus* inputBus, AudioBus* outputBuf, size_t framesToProcess);
 
index ef1229f695e1582d394206293a1a0d37215fc59e..85586980326f5207abbb0dd4232cfa03085cf927 100644 (file)
@@ -45,13 +45,13 @@ const unsigned HRTFDatabase::NumberOfRawElevations = 10; // -45 -> +90 (each 15
 const unsigned HRTFDatabase::InterpolationFactor = 1;
 const unsigned HRTFDatabase::NumberOfTotalElevations = NumberOfRawElevations * InterpolationFactor;
 
-PassOwnPtr<HRTFDatabase> HRTFDatabase::create(double sampleRate)
+PassOwnPtr<HRTFDatabase> HRTFDatabase::create(float sampleRate)
 {
     OwnPtr<HRTFDatabase> hrtfDatabase = adoptPtr(new HRTFDatabase(sampleRate));
     return hrtfDatabase.release();
 }
 
-HRTFDatabase::HRTFDatabase(double sampleRate)
+HRTFDatabase::HRTFDatabase(float sampleRate)
     : m_elevations(NumberOfTotalElevations)
     , m_sampleRate(sampleRate)
 {
@@ -75,7 +75,7 @@ HRTFDatabase::HRTFDatabase(double sampleRate)
 
             // Create the interpolated convolution kernels and delays.
             for (unsigned jj = 1; jj < InterpolationFactor; ++jj) {
-                double x = static_cast<double>(jj) / static_cast<double>(InterpolationFactor);
+                float x = static_cast<float>(jj) / static_cast<float>(InterpolationFactor);
                 m_elevations[i + jj] = HRTFElevation::createByInterpolatingSlices(m_elevations[i].get(), m_elevations[j].get(), x, sampleRate);
                 ASSERT(m_elevations[i + jj].get());
             }
index bf13a3a2230956abcd6b9eed5172926d75a71a18..bb7cf530f5a1c6f0a270b6aa273a543e2ada1643 100644 (file)
@@ -46,7 +46,7 @@ class HRTFKernel;
 class HRTFDatabase {
     WTF_MAKE_NONCOPYABLE(HRTFDatabase);
 public:
-    static PassOwnPtr<HRTFDatabase> create(double sampleRate);
+    static PassOwnPtr<HRTFDatabase> create(float sampleRate);
 
     // getKernelsFromAzimuthElevation() returns a left and right ear kernel, and an interpolated left and right frame delay for the given azimuth and elevation.
     // azimuthBlend must be in the range 0 -> 1.
@@ -57,10 +57,10 @@ public:
     // Returns the number of different azimuth angles.
     static unsigned numberOfAzimuths() { return HRTFElevation::NumberOfTotalAzimuths; }
 
-    double sampleRate() const { return m_sampleRate; }
+    float sampleRate() const { return m_sampleRate; }
     
 private:
-    explicit HRTFDatabase(double sampleRate);
+    explicit HRTFDatabase(float sampleRate);
 
     // Minimum and maximum elevation angles (inclusive) for a HRTFDatabase.
     static const int MinElevation;
@@ -80,7 +80,7 @@ private:
     static unsigned indexFromElevationAngle(double);
 
     Vector<OwnPtr<HRTFElevation> > m_elevations;                                            
-    double m_sampleRate;
+    float m_sampleRate;
 };
 
 } // namespace WebCore
index 9acdc3258fa86ccfbc7886e8dde95996a877dd23..cb7355aa8fa3ad15c81b7ca43e01d4eec4380cc3 100644 (file)
@@ -40,7 +40,7 @@ namespace WebCore {
 // Singleton
 HRTFDatabaseLoader* HRTFDatabaseLoader::s_loader = 0;
 
-PassRefPtr<HRTFDatabaseLoader> HRTFDatabaseLoader::createAndLoadAsynchronouslyIfNecessary(double sampleRate)
+PassRefPtr<HRTFDatabaseLoader> HRTFDatabaseLoader::createAndLoadAsynchronouslyIfNecessary(float sampleRate)
 {
     ASSERT(isMainThread());
 
@@ -59,7 +59,7 @@ PassRefPtr<HRTFDatabaseLoader> HRTFDatabaseLoader::createAndLoadAsynchronouslyIf
     return loader;
 }
 
-HRTFDatabaseLoader::HRTFDatabaseLoader(double sampleRate)
+HRTFDatabaseLoader::HRTFDatabaseLoader(float sampleRate)
     : m_databaseLoaderThread(0)
     , m_databaseSampleRate(sampleRate)
 {
index ee7eb223c79b421035efb9931a347ad7111c5475..e67707e814a1dac7cbef5bce68e10743079cea84 100644 (file)
@@ -44,7 +44,7 @@ public:
     // Lazily creates the singleton HRTFDatabaseLoader (if not already created) and starts loading asynchronously (when created the first time).
     // Returns the singleton HRTFDatabaseLoader.
     // Must be called from the main thread.
-    static PassRefPtr<HRTFDatabaseLoader> createAndLoadAsynchronouslyIfNecessary(double sampleRate);
+    static PassRefPtr<HRTFDatabaseLoader> createAndLoadAsynchronouslyIfNecessary(float sampleRate);
 
     // Returns the singleton HRTFDatabaseLoader.
     static HRTFDatabaseLoader* loader() { return s_loader; }
@@ -60,7 +60,7 @@ public:
     
     HRTFDatabase* database() { return m_hrtfDatabase.get(); }
 
-    double databaseSampleRate() const { return m_databaseSampleRate; }
+    float databaseSampleRate() const { return m_databaseSampleRate; }
     
     // Called in asynchronous loading thread.
     void load();
@@ -72,7 +72,7 @@ public:
 
 private:
     // Both constructor and destructor must be called from the main thread.
-    explicit HRTFDatabaseLoader(double sampleRate);    
+    explicit HRTFDatabaseLoader(float sampleRate);
     
     // If it hasn't already been loaded, creates a new thread and initiates asynchronous loading of the default database.
     // This must be called from the main thread.
@@ -85,7 +85,7 @@ private:
     Mutex m_threadLock;
     ThreadIdentifier m_databaseLoaderThread;
 
-    double m_databaseSampleRate;    
+    float m_databaseSampleRate;
 };
 
 } // namespace WebCore
index b0db8623f963a0e77c09a8f54143e4a7db061730..cf21255490265eb8944dc21273191ab12b05b261 100644 (file)
@@ -52,7 +52,7 @@ const unsigned HRTFElevation::NumberOfTotalAzimuths = NumberOfRawAzimuths * Inte
 
 // Takes advantage of the symmetry and creates a composite version of the two measured versions.  For example, we have both azimuth 30 and -30 degrees
 // where the roles of left and right ears are reversed with respect to each other.
-bool HRTFElevation::calculateSymmetricKernelsForAzimuthElevation(int azimuth, int elevation, double sampleRate, const String& subjectName,
+bool HRTFElevation::calculateSymmetricKernelsForAzimuthElevation(int azimuth, int elevation, float sampleRate, const String& subjectName,
                                                                  RefPtr<HRTFKernel>& kernelL, RefPtr<HRTFKernel>& kernelR)
 {
     RefPtr<HRTFKernel> kernelL1;
@@ -71,13 +71,13 @@ bool HRTFElevation::calculateSymmetricKernelsForAzimuthElevation(int azimuth, in
         return false;
         
     // Notice L/R reversal in symmetric version.
-    kernelL = HRTFKernel::createInterpolatedKernel(kernelL1.get(), kernelR2.get(), 0.5);
-    kernelR = HRTFKernel::createInterpolatedKernel(kernelR1.get(), kernelL2.get(), 0.5);
+    kernelL = HRTFKernel::createInterpolatedKernel(kernelL1.get(), kernelR2.get(), 0.5f);
+    kernelR = HRTFKernel::createInterpolatedKernel(kernelR1.get(), kernelL2.get(), 0.5f);
     
     return true;
 }
 
-bool HRTFElevation::calculateKernelsForAzimuthElevation(int azimuth, int elevation, double sampleRate, const String& subjectName,
+bool HRTFElevation::calculateKernelsForAzimuthElevation(int azimuth, int elevation, float sampleRate, const String& subjectName,
                                                         RefPtr<HRTFKernel>& kernelL, RefPtr<HRTFKernel>& kernelR)
 {
     // Valid values for azimuth are 0 -> 345 in 15 degree increments.
@@ -158,7 +158,7 @@ static int maxElevations[] = {
     45 //  345 
 };
 
-PassOwnPtr<HRTFElevation> HRTFElevation::createForSubject(const String& subjectName, int elevation, double sampleRate)
+PassOwnPtr<HRTFElevation> HRTFElevation::createForSubject(const String& subjectName, int elevation, float sampleRate)
 {
     bool isElevationGood = elevation >= -45 && elevation <= 90 && (elevation / 15) * 15 == elevation;
     ASSERT(isElevationGood);
@@ -188,7 +188,7 @@ PassOwnPtr<HRTFElevation> HRTFElevation::createForSubject(const String& subjectN
 
         // Create the interpolated convolution kernels and delays.
         for (unsigned jj = 1; jj < InterpolationFactor; ++jj) {
-            double x = double(jj) / double(InterpolationFactor); // interpolate from 0 -> 1
+            float x = float(jj) / float(InterpolationFactor); // interpolate from 0 -> 1
 
             (*kernelListL)[i + jj] = HRTFKernel::createInterpolatedKernel(kernelListL->at(i).get(), kernelListL->at(j).get(), x);
             (*kernelListR)[i + jj] = HRTFKernel::createInterpolatedKernel(kernelListR->at(i).get(), kernelListR->at(j).get(), x);
@@ -199,7 +199,7 @@ PassOwnPtr<HRTFElevation> HRTFElevation::createForSubject(const String& subjectN
     return hrtfElevation.release();
 }
 
-PassOwnPtr<HRTFElevation> HRTFElevation::createByInterpolatingSlices(HRTFElevation* hrtfElevation1, HRTFElevation* hrtfElevation2, double x, double sampleRate)
+PassOwnPtr<HRTFElevation> HRTFElevation::createByInterpolatingSlices(HRTFElevation* hrtfElevation1, HRTFElevation* hrtfElevation2, float x, float sampleRate)
 {
     ASSERT(hrtfElevation1 && hrtfElevation2);
     if (!hrtfElevation1 || !hrtfElevation2)
index 24b7822886deda109544cf072b11340fd29148bb..ccff097f8ad6cf11b52c8977e0f8af33e079d22b 100644 (file)
@@ -50,10 +50,10 @@ public:
     // Normally, there will only be a single HRTF database set, but this API supports the possibility of multiple ones with different names.
     // Interpolated azimuths will be generated based on InterpolationFactor.
     // Valid values for elevation are -45 -> +90 in 15 degree increments.
-    static PassOwnPtr<HRTFElevation> createForSubject(const String& subjectName, int elevation, double sampleRate);
+    static PassOwnPtr<HRTFElevation> createForSubject(const String& subjectName, int elevation, float sampleRate);
 
     // Given two HRTFElevations, and an interpolation factor x: 0 -> 1, returns an interpolated HRTFElevation.
-    static PassOwnPtr<HRTFElevation> createByInterpolatingSlices(HRTFElevation* hrtfElevation1, HRTFElevation* hrtfElevation2, double x, double sampleRate);
+    static PassOwnPtr<HRTFElevation> createByInterpolatingSlices(HRTFElevation* hrtfElevation1, HRTFElevation* hrtfElevation2, float x, float sampleRate);
 
     // Returns the list of left or right ear HRTFKernels for all the azimuths going from 0 to 360 degrees.
     HRTFKernelList* kernelListL() { return m_kernelListL.get(); }
@@ -61,7 +61,7 @@ public:
 
     double elevationAngle() const { return m_elevationAngle; }
     unsigned numberOfAzimuths() { return NumberOfTotalAzimuths; }
-    double sampleRate() const { return m_sampleRate; }
+    float sampleRate() const { return m_sampleRate; }
     
     // Returns the left and right kernels for the given azimuth index.
     // The interpolated delays based on azimuthBlend: 0 -> 1 are returned in frameDelayL and frameDelayR.
@@ -83,16 +83,16 @@ public:
     // Valid values for azimuth are 0 -> 345 in 15 degree increments.
     // Valid values for elevation are -45 -> +90 in 15 degree increments.
     // Returns true on success.
-    static bool calculateKernelsForAzimuthElevation(int azimuth, int elevation, double sampleRate, const String& subjectName,
+    static bool calculateKernelsForAzimuthElevation(int azimuth, int elevation, float sampleRate, const String& subjectName,
                                                     RefPtr<HRTFKernel>& kernelL, RefPtr<HRTFKernel>& kernelR);
 
     // Given a specific azimuth and elevation angle, returns the left and right HRTFKernel in kernelL and kernelR.
     // This method averages the measured response using symmetry of azimuth (for example by averaging the -30.0 and +30.0 azimuth responses).
     // Returns true on success.
-    static bool calculateSymmetricKernelsForAzimuthElevation(int azimuth, int elevation, double sampleRate, const String& subjectName,
+    static bool calculateSymmetricKernelsForAzimuthElevation(int azimuth, int elevation, float sampleRate, const String& subjectName,
                                                              RefPtr<HRTFKernel>& kernelL, RefPtr<HRTFKernel>& kernelR);
 private:
-    HRTFElevation(PassOwnPtr<HRTFKernelList> kernelListL, PassOwnPtr<HRTFKernelList> kernelListR, int elevation, double sampleRate)
+    HRTFElevation(PassOwnPtr<HRTFKernelList> kernelListL, PassOwnPtr<HRTFKernelList> kernelListR, int elevation, float sampleRate)
         : m_kernelListL(kernelListL)
         , m_kernelListR(kernelListR)
         , m_elevationAngle(elevation)
@@ -103,7 +103,7 @@ private:
     OwnPtr<HRTFKernelList> m_kernelListL;
     OwnPtr<HRTFKernelList> m_kernelListR;
     double m_elevationAngle;
-    double m_sampleRate;
+    float m_sampleRate;
 };
 
 } // namespace WebCore
index 9db35ba65d647f6f34fab2e96da43304991b362f..68eccb9693151d8b027b775810cfedc4283415da 100644 (file)
@@ -35,6 +35,7 @@
 #include "AudioChannel.h"
 #include "Biquad.h"
 #include "FFTFrame.h"
+#include "FloatConversion.h"
 #include <wtf/MathExtras.h>
 
 using namespace std;
@@ -45,7 +46,7 @@ namespace WebCore {
 // This represents the initial delay before the most energetic part of the impulse response.
 // The sample-frame delay is removed from the impulseP impulse response, and this value  is returned.
 // the length of the passed in AudioChannel must be a power of 2.
-static double extractAverageGroupDelay(AudioChannel* channel, size_t analysisFFTSize)
+static float extractAverageGroupDelay(AudioChannel* channel, size_t analysisFFTSize)
 {
     ASSERT(channel);
         
@@ -59,14 +60,14 @@ static double extractAverageGroupDelay(AudioChannel* channel, size_t analysisFFT
     FFTFrame estimationFrame(analysisFFTSize);
     estimationFrame.doFFT(impulseP);
 
-    double frameDelay = estimationFrame.extractAverageGroupDelay();
+    float frameDelay = narrowPrecisionToFloat(estimationFrame.extractAverageGroupDelay());
     estimationFrame.doInverseFFT(impulseP);
 
     return frameDelay;
 }
 
-HRTFKernel::HRTFKernel(AudioChannel* channel, size_t fftSize, double sampleRate, bool bassBoost)
-    : m_frameDelay(0.0)
+HRTFKernel::HRTFKernel(AudioChannel* channel, size_t fftSize, float sampleRate, bool bassBoost)
+    : m_frameDelay(0)
     , m_sampleRate(sampleRate)
 {
     ASSERT(channel);
@@ -116,22 +117,22 @@ PassOwnPtr<AudioChannel> HRTFKernel::createImpulseResponse()
 }
 
 // Interpolates two kernels with x: 0 -> 1 and returns the result.
-PassRefPtr<HRTFKernel> HRTFKernel::createInterpolatedKernel(HRTFKernel* kernel1, HRTFKernel* kernel2, double x)
+PassRefPtr<HRTFKernel> HRTFKernel::createInterpolatedKernel(HRTFKernel* kernel1, HRTFKernel* kernel2, float x)
 {
     ASSERT(kernel1 && kernel2);
     if (!kernel1 || !kernel2)
         return 0;
  
     ASSERT(x >= 0.0 && x < 1.0);
-    x = min(1.0, max(0.0, x));
+    x = min(1.0f, max(0.0f, x));
     
-    double sampleRate1 = kernel1->sampleRate();
-    double sampleRate2 = kernel2->sampleRate();
+    float sampleRate1 = kernel1->sampleRate();
+    float sampleRate2 = kernel2->sampleRate();
     ASSERT(sampleRate1 == sampleRate2);
     if (sampleRate1 != sampleRate2)
         return 0;
     
-    double frameDelay = (1.0 - x) * kernel1->frameDelay() + x * kernel2->frameDelay();
+    float frameDelay = (1 - x) * kernel1->frameDelay() + x * kernel2->frameDelay();
     
     OwnPtr<FFTFrame> interpolatedFrame = FFTFrame::createInterpolatedFrame(*kernel1->fftFrame(), *kernel2->fftFrame(), x);
     return HRTFKernel::create(interpolatedFrame.release(), frameDelay, sampleRate1);
index 572a08511d8b1d252748d6e41c91381c6d651f9e..3a8dbfde2f327da31756352f95079f1d9aaad75f 100644 (file)
@@ -51,25 +51,25 @@ class HRTFKernel : public RefCounted<HRTFKernel> {
 public:
     // Note: this is destructive on the passed in AudioChannel.
     // The length of channel must be a power of two.
-    static PassRefPtr<HRTFKernel> create(AudioChannel* channel, size_t fftSize, double sampleRate, bool bassBoost)
+    static PassRefPtr<HRTFKernel> create(AudioChannel* channel, size_t fftSize, float sampleRate, bool bassBoost)
     {
         return adoptRef(new HRTFKernel(channel, fftSize, sampleRate, bassBoost));
     }
 
-    static PassRefPtr<HRTFKernel> create(PassOwnPtr<FFTFrame> fftFrame, double frameDelay, double sampleRate)
+    static PassRefPtr<HRTFKernel> create(PassOwnPtr<FFTFrame> fftFrame, float frameDelay, float sampleRate)
     {
         return adoptRef(new HRTFKernel(fftFrame, frameDelay, sampleRate));
     }
 
     // Given two HRTFKernels, and an interpolation factor x: 0 -> 1, returns an interpolated HRTFKernel.
-    static PassRefPtr<HRTFKernel> createInterpolatedKernel(HRTFKernel* kernel1, HRTFKernel* kernel2, double x);
+    static PassRefPtr<HRTFKernel> createInterpolatedKernel(HRTFKernel* kernel1, HRTFKernel* kernel2, float x);
   
     FFTFrame* fftFrame() { return m_fftFrame.get(); }
     
     size_t fftSize() const { return m_fftFrame->fftSize(); }
-    double frameDelay() const { return m_frameDelay; }
+    float frameDelay() const { return m_frameDelay; }
 
-    double sampleRate() const { return m_sampleRate; }
+    float sampleRate() const { return m_sampleRate; }
     double nyquist() const { return 0.5 * sampleRate(); }
 
     // Converts back into impulse-response form.
@@ -77,9 +77,9 @@ public:
 
 private:
     // Note: this is destructive on the passed in AudioChannel.
-    HRTFKernel(AudioChannel* channel, size_t fftSize, double sampleRate, bool bassBoost);
+    HRTFKernel(AudioChannel*, size_t fftSize, float sampleRate, bool bassBoost);
     
-    HRTFKernel(PassOwnPtr<FFTFrame> fftFrame, double frameDelay, double sampleRate)
+    HRTFKernel(PassOwnPtr<FFTFrame> fftFrame, float frameDelay, float sampleRate)
         : m_fftFrame(fftFrame)
         , m_frameDelay(frameDelay)
         , m_sampleRate(sampleRate)
@@ -87,8 +87,8 @@ private:
     }
     
     OwnPtr<FFTFrame> m_fftFrame;
-    double m_frameDelay;
-    double m_sampleRate;
+    float m_frameDelay;
+    float m_sampleRate;
 };
 
 typedef Vector<RefPtr<HRTFKernel> > HRTFKernelList;
index 68bc505af90e0c967be5e63b093baf3d2b5c13c7..5377eb270d36fd832beb4d8aafb02cc339f98d95 100644 (file)
@@ -44,7 +44,7 @@ namespace WebCore {
 // We ASSERT the delay values used in process() with this value.
 const double MaxDelayTimeSeconds = 0.002;
 
-HRTFPanner::HRTFPanner(double sampleRate)
+HRTFPanner::HRTFPanner(float sampleRate)
     : Panner(PanningModelHRTF)
     , m_sampleRate(sampleRate)
     , m_isFirstRender(true)
@@ -60,7 +60,7 @@ HRTFPanner::~HRTFPanner()
 {
 }
 
-size_t HRTFPanner::fftSizeForSampleRate(double sampleRate)
+size_t HRTFPanner::fftSizeForSampleRate(float sampleRate)
 {
     // The HRTF impulse responses (loaded as audio resources) are 512 sample-frames @44.1KHz.
     // Currently, we truncate the impulse responses to half this size, but an FFT-size of twice impulse response size is needed (for convolution).
index 6c13d48b599d1372df85ec0aa440e1e0612f0fd4..e771ba28b3e4aaded6f802156463227a6ffe1281 100644 (file)
@@ -33,7 +33,7 @@ namespace WebCore {
 
 class HRTFPanner : public Panner {
 public:
-    explicit HRTFPanner(double sampleRate);
+    explicit HRTFPanner(float sampleRate);
     virtual ~HRTFPanner();
 
     // Panner
@@ -41,16 +41,16 @@ public:
     virtual void reset();
 
     size_t fftSize() { return fftSizeForSampleRate(m_sampleRate); }
-    static size_t fftSizeForSampleRate(double sampleRate);
+    static size_t fftSizeForSampleRate(float sampleRate);
 
-    double sampleRate() const { return m_sampleRate; }
+    float sampleRate() const { return m_sampleRate; }
     
 private:
     // Given an azimuth angle in the range -180 -> +180, returns the corresponding azimuth index for the database,
     // and azimuthBlend which is an interpolation value from 0 -> 1.
     int calculateDesiredAzimuthIndexAndBlend(double azimuth, double& azimuthBlend);
 
-    double m_sampleRate;
+    float m_sampleRate;
     
     // m_isFirstRender and m_azimuthIndex are used to avoid harshly changing from rendering at one azimuth angle to another angle very far away.
     // Changing the azimuth gradually produces a smoother sound.
index d3d1a2aecaeffea6cbe145cc1e6cad4cdc5869df..debc9e3216d70a9467b331f4461efbc14d51ddfd 100644 (file)
@@ -38,7 +38,7 @@
 
 namespace WebCore {
 
-PassOwnPtr<Panner> Panner::create(PanningModel model, double sampleRate)
+PassOwnPtr<Panner> Panner::create(PanningModel model, float sampleRate)
 {
     OwnPtr<Panner> panner;
 
index b57ceda2403b22fee61ea1bfb2c2d85d56daa510..4b728327d29ae8b3c3d839f5d9f0e1af76db4d66 100644 (file)
@@ -47,7 +47,7 @@ public:
     
     typedef unsigned PanningModel;
 
-    static PassOwnPtr<Panner> create(PanningModel model, double sampleRate);
+    static PassOwnPtr<Panner> create(PanningModel, float sampleRate);
 
     virtual ~Panner() { };
 
index d5c55260677a4d1682332ed4a0af65a70d8d1a4d..f2a009641164a75a470aede07b5ea755b83b36e0 100644 (file)
@@ -34,7 +34,7 @@
 
 namespace WebCore {
 
-PassOwnPtr<AudioBus> AudioBus::loadPlatformResource(const char* name, double sampleRate)
+PassOwnPtr<AudioBus> AudioBus::loadPlatformResource(const char* name, float sampleRate)
 {
     // FIXME: the sampleRate parameter is ignored. It should be removed from the API.
     OwnPtr<AudioBus> audioBus = PlatformSupport::loadPlatformAudioResource(name, sampleRate);
@@ -48,7 +48,7 @@ PassOwnPtr<AudioBus> AudioBus::loadPlatformResource(const char* name, double sam
     return AudioBus::createBySampleRateConverting(audioBus.get(), false, sampleRate);
 }
 
-PassOwnPtr<AudioBus> createBusFromInMemoryAudioFile(const void* data, size_t dataSize, bool mixToMono, double sampleRate)
+PassOwnPtr<AudioBus> createBusFromInMemoryAudioFile(const void* data, size_t dataSize, bool mixToMono, float sampleRate)
 {
     // FIXME: the sampleRate parameter is ignored. It should be removed from the API.
     OwnPtr<AudioBus> audioBus = PlatformSupport::decodeAudioFileData(static_cast<const char*>(data), dataSize, sampleRate);
index 6e6e27ab767cb1f04d5dbec60891b5b72cd02685..42a718b215aa9d404dc18d95a05b49097a44bb8b 100644 (file)
@@ -41,7 +41,7 @@
 
 namespace WebCore {
 
-PassOwnPtr<AudioBus> AudioBus::loadPlatformResource(const char* name, double sampleRate)
+PassOwnPtr<AudioBus> AudioBus::loadPlatformResource(const char* name, float sampleRate)
 {
     // This method can be called from other than the main thread, so we need an auto-release pool.
     NSAutoreleasePool* pool = [[NSAutoreleasePool alloc] init];
index d4ecabab21eb4134631ffb318840a0501e082d08..8bf8cc43745b35dc4d37141f3b93a9b15c3fe527 100644 (file)
@@ -33,6 +33,7 @@
 #include "AudioDestinationMac.h"
 
 #include "AudioSourceProvider.h"
+#include "FloatConversion.h"
 #include <CoreAudio/AudioHardware.h>
 
 namespace WebCore {
@@ -40,12 +41,12 @@ namespace WebCore {
 const int kBufferSize = 128;
 
 // Factory method: Mac-implementation
-PassOwnPtr<AudioDestination> AudioDestination::create(AudioSourceProvider& provider, double sampleRate)
+PassOwnPtr<AudioDestination> AudioDestination::create(AudioSourceProvider& provider, float sampleRate)
 {
     return adoptPtr(new AudioDestinationMac(provider, sampleRate));
 }
 
-double AudioDestination::hardwareSampleRate()
+float AudioDestination::hardwareSampleRate()
 {
     // Determine the default output device's sample-rate.
     AudioDeviceID deviceID = kAudioDeviceUnknown;
@@ -54,7 +55,7 @@ double AudioDestination::hardwareSampleRate()
     AudioObjectPropertyAddress defaultOutputDeviceAddress = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
     OSStatus result = AudioObjectGetPropertyData(kAudioObjectSystemObject, &defaultOutputDeviceAddress, 0, 0, &infoSize, (void*)&deviceID);
     if (result)
-        return 0.0; // error
+        return 0; // error
 
     Float64 nominalSampleRate;
     infoSize = sizeof(Float64);
@@ -62,12 +63,12 @@ double AudioDestination::hardwareSampleRate()
     AudioObjectPropertyAddress nominalSampleRateAddress = { kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
     result = AudioObjectGetPropertyData(deviceID, &nominalSampleRateAddress, 0, 0, &infoSize, (void*)&nominalSampleRate);
     if (result)
-        return 0.0; // error
+        return 0; // error
 
-    return nominalSampleRate;
+    return narrowPrecisionToFloat(nominalSampleRate);
 }
 
-AudioDestinationMac::AudioDestinationMac(AudioSourceProvider& provider, double sampleRate)
+AudioDestinationMac::AudioDestinationMac(AudioSourceProvider& provider, float sampleRate)
     : m_outputUnit(0)
     , m_provider(provider)
     , m_renderBus(2, kBufferSize, false)
index 197440c684f76064c889ae01898d0190a7df0121..963f7c735515a650cc200f2baed6689771f50d23 100644 (file)
@@ -39,14 +39,14 @@ namespace WebCore {
 
 class AudioDestinationMac : public AudioDestination {
 public:
-    AudioDestinationMac(AudioSourceProvider&, double sampleRate);
+    AudioDestinationMac(AudioSourceProvider&, float sampleRate);
     virtual ~AudioDestinationMac();
 
     virtual void start();
     virtual void stop();
     bool isPlaying() { return m_isPlaying; }
 
-    double sampleRate() const { return m_sampleRate; }
+    float sampleRate() const { return m_sampleRate; }
 
 private:
     void configure();
@@ -60,7 +60,7 @@ private:
     AudioSourceProvider& m_provider;
     AudioBus m_renderBus;
 
-    double m_sampleRate;
+    float m_sampleRate;
     bool m_isPlaying;
 };
 
index 426995e13d274015f5ff546cb1f32a2e44e41765..7b666a55ab879e40f68c4dbfec77c378c1cac9d4 100644 (file)
@@ -34,6 +34,7 @@
 
 #include "AudioBus.h"
 #include "AudioFileReader.h"
+#include "FloatConversion.h"
 #include <CoreFoundation/CoreFoundation.h>
 #include <CoreServices/CoreServices.h>
 
@@ -135,7 +136,7 @@ SInt64 AudioFileReader::getSizeProc(void* clientData)
     return audioFileReader->dataSize();
 }
 
-PassOwnPtr<AudioBus> AudioFileReader::createBus(double sampleRate, bool mixToMono)
+PassOwnPtr<AudioBus> AudioFileReader::createBus(float sampleRate, bool mixToMono)
 {
     if (!m_extAudioFileRef)
         return nullptr;
@@ -187,7 +188,7 @@ PassOwnPtr<AudioBus> AudioFileReader::createBus(double sampleRate, bool mixToMon
 
     // Create AudioBus where we'll put the PCM audio data
     OwnPtr<AudioBus> audioBus = adoptPtr(new AudioBus(busChannelCount, numberOfFrames));
-    audioBus->setSampleRate(m_clientDataFormat.mSampleRate); // save for later
+    audioBus->setSampleRate(narrowPrecisionToFloat(m_clientDataFormat.mSampleRate)); // save for later
 
     // Only allocated in the mixToMono case
     AudioFloatArray bufL;
@@ -241,13 +242,13 @@ PassOwnPtr<AudioBus> AudioFileReader::createBus(double sampleRate, bool mixToMon
     return audioBus.release();
 }
 
-PassOwnPtr<AudioBus> createBusFromAudioFile(const char* filePath, bool mixToMono, double sampleRate)
+PassOwnPtr<AudioBus> createBusFromAudioFile(const char* filePath, bool mixToMono, float sampleRate)
 {
     AudioFileReader reader(filePath);
     return reader.createBus(sampleRate, mixToMono);
 }
 
-PassOwnPtr<AudioBus> createBusFromInMemoryAudioFile(const void* data, size_t dataSize, bool mixToMono, double sampleRate)
+PassOwnPtr<AudioBus> createBusFromInMemoryAudioFile(const void* data, size_t dataSize, bool mixToMono, float sampleRate)
 {
     AudioFileReader reader(data, dataSize);
     return reader.createBus(sampleRate, mixToMono);
index d531266ca03f4bbc2274b1884027d2652f1dc856..2123431493e68abad7c6490050303cb5ec403876 100644 (file)
@@ -46,7 +46,7 @@ public:
     ~AudioFileReader();
 
     // Returns 0 if error
-    PassOwnPtr<AudioBus> createBus(double sampleRate, bool mixToMono);
+    PassOwnPtr<AudioBus> createBus(float sampleRate, bool mixToMono);
 
     const void* data() const { return m_data; }
     size_t dataSize() const { return m_dataSize; }
index a9a89021e18f7d8767f0672f26ff31a33c51fe1c..cd49ae0afda619373b244adbbd6e40d00964e0c5 100644 (file)
@@ -54,7 +54,7 @@ AsyncAudioDecoder::~AsyncAudioDecoder()
     m_threadID = 0;
 }
 
-void AsyncAudioDecoder::decodeAsync(ArrayBuffer* audioData, double sampleRate, PassRefPtr<AudioBufferCallback> successCallback, PassRefPtr<AudioBufferCallback> errorCallback)
+void AsyncAudioDecoder::decodeAsync(ArrayBuffer* audioData, float sampleRate, PassRefPtr<AudioBufferCallback> successCallback, PassRefPtr<AudioBufferCallback> errorCallback)
 {
     ASSERT(isMainThread());
     ASSERT(audioData);
@@ -91,12 +91,12 @@ void AsyncAudioDecoder::runLoop()
     }
 }
 
-PassOwnPtr<AsyncAudioDecoder::DecodingTask> AsyncAudioDecoder::DecodingTask::create(ArrayBuffer* audioData, double sampleRate, PassRefPtr<AudioBufferCallback> successCallback, PassRefPtr<AudioBufferCallback> errorCallback)
+PassOwnPtr<AsyncAudioDecoder::DecodingTask> AsyncAudioDecoder::DecodingTask::create(ArrayBuffer* audioData, float sampleRate, PassRefPtr<AudioBufferCallback> successCallback, PassRefPtr<AudioBufferCallback> errorCallback)
 {
     return adoptPtr(new DecodingTask(audioData, sampleRate, successCallback, errorCallback));
 }
 
-AsyncAudioDecoder::DecodingTask::DecodingTask(ArrayBuffer* audioData, double sampleRate, PassRefPtr<AudioBufferCallback> successCallback, PassRefPtr<AudioBufferCallback> errorCallback)
+AsyncAudioDecoder::DecodingTask::DecodingTask(ArrayBuffer* audioData, float sampleRate, PassRefPtr<AudioBufferCallback> successCallback, PassRefPtr<AudioBufferCallback> errorCallback)
     : m_audioData(audioData)
     , m_sampleRate(sampleRate)
     , m_successCallback(successCallback)
index f2cca8d0785ccea845cc22ad49d51dd418c9a9a6..f09542d39da4386d1e4a7a90aca4cabdb3c5996f 100644 (file)
@@ -47,21 +47,21 @@ public:
     ~AsyncAudioDecoder();
 
     // Must be called on the main thread.
-    void decodeAsync(ArrayBuffer* audioData, double sampleRate, PassRefPtr<AudioBufferCallback> successCallback, PassRefPtr<AudioBufferCallback> errorCallback);
+    void decodeAsync(ArrayBuffer* audioData, float sampleRate, PassRefPtr<AudioBufferCallback> successCallback, PassRefPtr<AudioBufferCallback> errorCallback);
 
 private:
     class DecodingTask {
         WTF_MAKE_NONCOPYABLE(DecodingTask);
     public:
-        static PassOwnPtr<DecodingTask> create(ArrayBuffer* audioData, double sampleRate, PassRefPtr<AudioBufferCallback> successCallback, PassRefPtr<AudioBufferCallback> errorCallback);
+        static PassOwnPtr<DecodingTask> create(ArrayBuffer* audioData, float sampleRate, PassRefPtr<AudioBufferCallback> successCallback, PassRefPtr<AudioBufferCallback> errorCallback);
 
         void decode();
         
     private:
-        DecodingTask(ArrayBuffer* audioData, double sampleRate, PassRefPtr<AudioBufferCallback> successCallback, PassRefPtr<AudioBufferCallback> errorCallback);
+        DecodingTask(ArrayBuffer* audioData, float sampleRate, PassRefPtr<AudioBufferCallback> successCallback, PassRefPtr<AudioBufferCallback> errorCallback);
 
         ArrayBuffer* audioData() { return m_audioData.get(); }
-        double sampleRate() const { return m_sampleRate; }
+        float sampleRate() const { return m_sampleRate; }
         AudioBufferCallback* successCallback() { return m_successCallback.get(); }
         AudioBufferCallback* errorCallback() { return m_errorCallback.get(); }
         AudioBuffer* audioBuffer() { return m_audioBuffer.get(); }
@@ -70,7 +70,7 @@ private:
         void notifyComplete();
 
         RefPtr<ArrayBuffer> m_audioData;
-        double m_sampleRate;
+        float m_sampleRate;
         RefPtr<AudioBufferCallback> m_successCallback;
         RefPtr<AudioBufferCallback> m_errorCallback;
         RefPtr<AudioBuffer> m_audioBuffer;
index 828062eeb7b2e5c08a331ee530738107201aa1d8..90142f27b2d68999ab6b1092393f206e9b5d5b2b 100644 (file)
@@ -36,7 +36,7 @@
 
 namespace WebCore {
 
-AudioBasicProcessorNode::AudioBasicProcessorNode(AudioContext* context, double sampleRate)
+AudioBasicProcessorNode::AudioBasicProcessorNode(AudioContext* context, float sampleRate)
     : AudioNode(context, sampleRate)
 {
     addInput(adoptPtr(new AudioNodeInput(this)));
index 38bfd3b54cea6b13f2da978efad230e49869335f..5a555dafba0ac984f1939e3ae6d2a47ecd3b9041 100644 (file)
@@ -39,7 +39,7 @@ class AudioProcessor;
 // AudioBasicProcessorNode is an AudioNode with one input and one output where the input and output have the same number of channels.
 class AudioBasicProcessorNode : public AudioNode {
 public:
-    AudioBasicProcessorNode(AudioContext*, double sampleRate);
+    AudioBasicProcessorNode(AudioContext*, float sampleRate);
 
     // AudioNode
     virtual void process(size_t framesToProcess);
index ff4f042878393dab55183a7ccde4fd31d4b47578..6e24d3b49a4ac58a4e771a5a82c0cfef9d50a0e2 100644 (file)
 
 namespace WebCore {
 
-PassRefPtr<AudioBuffer> AudioBuffer::create(unsigned numberOfChannels, size_t numberOfFrames, double sampleRate)
+PassRefPtr<AudioBuffer> AudioBuffer::create(unsigned numberOfChannels, size_t numberOfFrames, float sampleRate)
 {
     return adoptRef(new AudioBuffer(numberOfChannels, numberOfFrames, sampleRate));
 }
 
-PassRefPtr<AudioBuffer> AudioBuffer::createFromAudioFileData(const void* data, size_t dataSize, bool mixToMono, double sampleRate)
+PassRefPtr<AudioBuffer> AudioBuffer::createFromAudioFileData(const void* data, size_t dataSize, bool mixToMono, float sampleRate)
 {
     OwnPtr<AudioBus> bus = createBusFromInMemoryAudioFile(data, dataSize, mixToMono, sampleRate);
     if (bus.get())
@@ -53,7 +53,7 @@ PassRefPtr<AudioBuffer> AudioBuffer::createFromAudioFileData(const void* data, s
     return 0;
 }
 
-AudioBuffer::AudioBuffer(unsigned numberOfChannels, size_t numberOfFrames, double sampleRate)
+AudioBuffer::AudioBuffer(unsigned numberOfChannels, size_t numberOfFrames, float sampleRate)
     : m_gain(1.0)
     , m_sampleRate(sampleRate)
     , m_length(numberOfFrames)
index b11a20e052f7798ca7bf88e1dcf0a107d13fb09e..d5e9f2b4b4e1fea2483b2c7efb82920873f6f656 100644 (file)
@@ -41,15 +41,15 @@ class AudioBus;
     
 class AudioBuffer : public RefCounted<AudioBuffer> {
 public:   
-    static PassRefPtr<AudioBuffer> create(unsigned numberOfChannels, size_t numberOfFrames, double sampleRate);
+    static PassRefPtr<AudioBuffer> create(unsigned numberOfChannels, size_t numberOfFrames, float sampleRate);
 
     // Returns 0 if data is not a valid audio file.
-    static PassRefPtr<AudioBuffer> createFromAudioFileData(const void* data, size_t dataSize, bool mixToMono, double sampleRate);
+    static PassRefPtr<AudioBuffer> createFromAudioFileData(const void* data, size_t dataSize, bool mixToMono, float sampleRate);
 
     // Format
     size_t length() const { return m_length; }
     double duration() const { return length() / sampleRate(); }
-    double sampleRate() const { return m_sampleRate; }
+    float sampleRate() const { return m_sampleRate; }
 
     // Channel data access
     unsigned numberOfChannels() const { return m_channels.size(); }
@@ -66,11 +66,11 @@ public:
     void releaseMemory();
     
 protected:
-    AudioBuffer(unsigned numberOfChannels, size_t numberOfFrames, double sampleRate);
+    AudioBuffer(unsigned numberOfChannels, size_t numberOfFrames, float sampleRate);
     AudioBuffer(AudioBus* bus);
 
     double m_gain; // scalar gain
-    double m_sampleRate;
+    float m_sampleRate;
     size_t m_length;
 
     Vector<RefPtr<Float32Array> > m_channels;
index 4a230e08230cd5bd3b605ef759c9e90b581a634e..b4b863f659bb325fb201059e8cb2a12c6dea9e5b 100644 (file)
@@ -31,6 +31,7 @@
 #include "AudioContext.h"
 #include "AudioNodeOutput.h"
 #include "Document.h"
+#include "FloatConversion.h"
 #include "ScriptCallStack.h"
 #include <algorithm>
 #include <wtf/MainThread.h>
@@ -48,12 +49,12 @@ const double UnknownTime = -1;
 // to minimize linear interpolation aliasing.
 const double MaxRate = 1024;
 
-PassRefPtr<AudioBufferSourceNode> AudioBufferSourceNode::create(AudioContext* context, double sampleRate)
+PassRefPtr<AudioBufferSourceNode> AudioBufferSourceNode::create(AudioContext* context, float sampleRate)
 {
     return adoptRef(new AudioBufferSourceNode(context, sampleRate));
 }
 
-AudioBufferSourceNode::AudioBufferSourceNode(AudioContext* context, double sampleRate)
+AudioBufferSourceNode::AudioBufferSourceNode(AudioContext* context, float sampleRate)
     : AudioSourceNode(context, sampleRate)
     , m_buffer(0)
     , m_isPlaying(false)
@@ -100,7 +101,7 @@ void AudioBufferSourceNode::process(size_t framesToProcess)
     // Careful - this is a tryLock() and not an autolocker, so we must unlock() before every return.
     if (m_processLock.tryLock()) {
         // Check if it's time to start playing.
-        double sampleRate = this->sampleRate();
+        float sampleRate = this->sampleRate();
         double quantumStartTime = context()->currentTime();
         double quantumEndTime = quantumStartTime + framesToProcess / sampleRate;
 
@@ -133,8 +134,8 @@ void AudioBufferSourceNode::process(size_t framesToProcess)
         // If the end time is somewhere in the middle of this time quantum, then simply zero out the
         // frames starting at the end time.
         if (m_endTime != UnknownTime && m_endTime >= quantumStartTime && m_endTime < quantumEndTime) {
-            unsigned zeroStartFrame = (m_endTime - quantumStartTime) * sampleRate;
-            unsigned framesToZero = framesToProcess - zeroStartFrame;
+            size_t zeroStartFrame = narrowPrecisionToFloat((m_endTime - quantumStartTime) * sampleRate);
+            size_t framesToZero = framesToProcess - zeroStartFrame;
 
             bool isSafe = zeroStartFrame < framesToProcess && framesToZero <= framesToProcess && zeroStartFrame + framesToZero <= framesToProcess;
             ASSERT(isSafe);
@@ -272,13 +273,13 @@ void AudioBufferSourceNode::renderFromBuffer(AudioBus* bus, unsigned destination
         double sampleL1 = sourceL[readIndex];
         double sampleL2 = sourceL[readIndex2];
         double sampleL = (1.0 - interpolationFactor) * sampleL1 + interpolationFactor * sampleL2;
-        *destinationL++ = sampleL;
+        *destinationL++ = narrowPrecisionToFloat(sampleL);
 
         if (isStereo) {
             double sampleR1 = sourceR[readIndex];
             double sampleR2 = sourceR[readIndex2];
             double sampleR = (1.0 - interpolationFactor) * sampleR1 + interpolationFactor * sampleR2;
-            *destinationR++ = sampleR;
+            *destinationR++ = narrowPrecisionToFloat(sampleR);
         }
 
         virtualReadIndex += pitchRate;
index c769c97ff009ee9eb3323ff8290ac673e7ba462e..fd73081d6b3f81b4e1fb7b2b3cd38bd800e783b0 100644 (file)
@@ -43,7 +43,7 @@ class AudioContext;
 
 class AudioBufferSourceNode : public AudioSourceNode {
 public:
-    static PassRefPtr<AudioBufferSourceNode> create(AudioContext*, double sampleRate);
+    static PassRefPtr<AudioBufferSourceNode> create(AudioContext*, float sampleRate);
 
     virtual ~AudioBufferSourceNode();
     
@@ -83,7 +83,7 @@ public:
     void setPannerNode(PassRefPtr<AudioPannerNode> pannerNode) { m_pannerNode = pannerNode; }
 
 private:
-    AudioBufferSourceNode(AudioContext*, double sampleRate);
+    AudioBufferSourceNode(AudioContext*, float sampleRate);
 
     void renderFromBuffer(AudioBus*, unsigned destinationFrameOffset, size_t numberOfFrames);
 
index 0f8cc0002fe1cd29d3c80754a799d97cb965ffca..51e7855868331a45f76553542d28c3f1a11d198d 100644 (file)
@@ -42,7 +42,7 @@ namespace WebCore {
 // It can easily be increased to support more if the web audio specification is updated.
 const unsigned NumberOfInputs = 6;
 
-AudioChannelMerger::AudioChannelMerger(AudioContext* context, double sampleRate)
+AudioChannelMerger::AudioChannelMerger(AudioContext* context, float sampleRate)
     : AudioNode(context, sampleRate)
 {
     // Create a fixed number of inputs (able to handle the maximum number of channels we deal with).
index b7b718b4915a89ef7c6085d861edc7064e2cf4bf..e773daeb913f52d26554fa638f8cd60a20cffa0c 100644 (file)
@@ -38,7 +38,7 @@ class AudioContext;
     
 class AudioChannelMerger : public AudioNode {
 public:
-    static PassRefPtr<AudioChannelMerger> create(AudioContext* context, double sampleRate)
+    static PassRefPtr<AudioChannelMerger> create(AudioContext* context, float sampleRate)
     {
         return adoptRef(new AudioChannelMerger(context, sampleRate));      
     }
@@ -51,7 +51,7 @@ public:
     virtual void checkNumberOfChannelsForInput(AudioNodeInput*);
 
 private:
-    AudioChannelMerger(AudioContext*, double sampleRate);
+    AudioChannelMerger(AudioContext*, float sampleRate);
 };
 
 } // namespace WebCore
index 7e90b7a93c6f4d285a514e6e4fcea25ec780ef7c..e5cc72a86b6a011475e3d6a83875f8c56a3f48be 100644 (file)
@@ -37,7 +37,7 @@ namespace WebCore {
 // It can easily be increased to support more if the web audio specification is updated.
 const unsigned NumberOfOutputs = 6;
 
-AudioChannelSplitter::AudioChannelSplitter(AudioContext* context, double sampleRate)
+AudioChannelSplitter::AudioChannelSplitter(AudioContext* context, float sampleRate)
     : AudioNode(context, sampleRate)
 {
     addInput(adoptPtr(new AudioNodeInput(this)));
index 7dadac571c7440ffe1da7ae8554ba100553c6892..71b0ef415bfd45e63c3f017f77bdcc9a6406ccbf 100644 (file)
@@ -34,7 +34,7 @@ class AudioContext;
     
 class AudioChannelSplitter : public AudioNode {
 public:
-    static PassRefPtr<AudioChannelSplitter> create(AudioContext* context, double sampleRate)
+    static PassRefPtr<AudioChannelSplitter> create(AudioContext* context, float sampleRate)
     {
         return adoptRef(new AudioChannelSplitter(context, sampleRate));      
     }
@@ -44,7 +44,7 @@ public:
     virtual void reset();
 
 private:
-    AudioChannelSplitter(AudioContext*, double sampleRate);
+    AudioChannelSplitter(AudioContext*, float sampleRate);
 };
 
 } // namespace WebCore
index 39e8f3df2a34a6f6d788ad25e222d8b9b3a809ff..3efffec91dc06d81e91ded1ce557b307ef13e2ed 100644 (file)
@@ -82,7 +82,7 @@ namespace WebCore {
     
 namespace {
     
-bool isSampleRateRangeGood(double sampleRate)
+bool isSampleRateRangeGood(float sampleRate)
 {
     return sampleRate >= 22050 && sampleRate <= 96000;
 }
@@ -103,7 +103,7 @@ PassRefPtr<AudioContext> AudioContext::create(Document* document)
     return adoptRef(new AudioContext(document));
 }
 
-PassRefPtr<AudioContext> AudioContext::createOfflineContext(Document* document, unsigned numberOfChannels, size_t numberOfFrames, double sampleRate, ExceptionCode& ec)
+PassRefPtr<AudioContext> AudioContext::createOfflineContext(Document* document, unsigned numberOfChannels, size_t numberOfFrames, float sampleRate, ExceptionCode& ec)
 {
     ASSERT(document);
 
@@ -143,7 +143,7 @@ AudioContext::AudioContext(Document* document)
 }
 
 // Constructor for offline (non-realtime) rendering.
-AudioContext::AudioContext(Document* document, unsigned numberOfChannels, size_t numberOfFrames, double sampleRate)
+AudioContext::AudioContext(Document* document, unsigned numberOfChannels, size_t numberOfFrames, float sampleRate)
     : ActiveDOMObject(document, this)
     , m_isInitialized(false)
     , m_isAudioThreadFinished(false)
@@ -294,7 +294,7 @@ void AudioContext::refBuffer(PassRefPtr<AudioBuffer> buffer)
     m_allocatedBuffers.append(buffer);
 }
 
-PassRefPtr<AudioBuffer> AudioContext::createBuffer(unsigned numberOfChannels, size_t numberOfFrames, double sampleRate)
+PassRefPtr<AudioBuffer> AudioContext::createBuffer(unsigned numberOfChannels, size_t numberOfFrames, float sampleRate)
 {
     if (!isSampleRateRangeGood(sampleRate) || numberOfChannels > 10 || !numberOfFrames)
         return 0;
index 8f5c35b0a4e4096ae7a11ea49a35641b705053b3..fa73d70930885429f9a3eeb78c2fbdc3181ea1f5 100644 (file)
@@ -76,7 +76,7 @@ public:
     static PassRefPtr<AudioContext> create(Document*);
 
     // Create an AudioContext for offline (non-realtime) rendering.
-    static PassRefPtr<AudioContext> createOfflineContext(Document*, unsigned numberOfChannels, size_t numberOfFrames, double sampleRate, ExceptionCode&);
+    static PassRefPtr<AudioContext> createOfflineContext(Document*, unsigned numberOfChannels, size_t numberOfFrames, float sampleRate, ExceptionCode&);
 
     virtual ~AudioContext();
 
@@ -95,9 +95,9 @@ public:
 
     AudioDestinationNode* destination() { return m_destinationNode.get(); }
     double currentTime() { return m_destinationNode->currentTime(); }
-    double sampleRate() { return m_destinationNode->sampleRate(); }
+    float sampleRate() { return m_destinationNode->sampleRate(); }
 
-    PassRefPtr<AudioBuffer> createBuffer(unsigned numberOfChannels, size_t numberOfFrames, double sampleRate);
+    PassRefPtr<AudioBuffer> createBuffer(unsigned numberOfChannels, size_t numberOfFrames, float sampleRate);
     PassRefPtr<AudioBuffer> createBuffer(ArrayBuffer* arrayBuffer, bool mixToMono);
 
     // Asynchronous audio file data decoding.
@@ -226,7 +226,7 @@ public:
     
 private:
     AudioContext(Document*);
-    AudioContext(Document*, unsigned numberOfChannels, size_t numberOfFrames, double sampleRate);
+    AudioContext(Document*, unsigned numberOfChannels, size_t numberOfFrames, float sampleRate);
     void constructCommon();
 
     void lazyInitialize();
index a735ca10c09c63a29388207e0ab85e2d39b1c871..ff9ebbd32786497cccd90b59e012ddd211e34add 100644 (file)
@@ -36,7 +36,7 @@
 
 namespace WebCore {
     
-AudioDestinationNode::AudioDestinationNode(AudioContext* context, double sampleRate)
+AudioDestinationNode::AudioDestinationNode(AudioContext* context, float sampleRate)
     : AudioNode(context, sampleRate)
     , m_currentTime(0.0)
 {
index 5913205f84b948d683334109520330bd0f6056c9..d7bc7bc12d79026eb5afb57f4efe72468b537d52 100644 (file)
@@ -36,7 +36,7 @@ class AudioContext;
     
 class AudioDestinationNode : public AudioNode, public AudioSourceProvider {
 public:
-    AudioDestinationNode(AudioContext*, double sampleRate);
+    AudioDestinationNode(AudioContext*, float sampleRate);
     virtual ~AudioDestinationNode();
     
     // AudioNode   
@@ -48,7 +48,7 @@ public:
 
     double currentTime() { return m_currentTime; }
 
-    virtual double sampleRate() const = 0;
+    virtual float sampleRate() const = 0;
 
     virtual unsigned numberOfChannels() const { return 2; } // FIXME: update when multi-channel (more than stereo) is supported
 
index 3678792d2674dd68b864c99a047602601dea3df6..2129c85d6918008885fe871728eacc518dd40428 100644 (file)
@@ -34,7 +34,7 @@
 
 namespace WebCore {
 
-AudioGainNode::AudioGainNode(AudioContext* context, double sampleRate)
+AudioGainNode::AudioGainNode(AudioContext* context, float sampleRate)
     : AudioNode(context, sampleRate)
     , m_lastGain(1.0)
     , m_sampleAccurateGainValues(AudioNode::ProcessingSizeInFrames) // FIXME: can probably share temp buffer in context
index a0d3c2b208aa8cef3717d53f062f47b3f520b87e..e6342128c77994bbd48d20d6f187b28d579194e2 100644 (file)
@@ -39,7 +39,7 @@ class AudioContext;
 
 class AudioGainNode : public AudioNode {
 public:
-    static PassRefPtr<AudioGainNode> create(AudioContext* context, double sampleRate)
+    static PassRefPtr<AudioGainNode> create(AudioContext* context, float sampleRate)
     {
         return adoptRef(new AudioGainNode(context, sampleRate));      
     }
@@ -55,7 +55,7 @@ public:
     AudioGain* gain() { return m_gain.get(); }                                   
     
 private:
-    AudioGainNode(AudioContext*, double sampleRate);
+    AudioGainNode(AudioContext*, float sampleRate);
 
     double m_lastGain; // for de-zippering
     RefPtr<AudioGain> m_gain;
index 44fb02cd8fba3181988b2bccdc109779d904042a..2f1ef764df8dbd22b673c0ec70bfb6f63b1ab4d2 100644 (file)
@@ -38,10 +38,10 @@ namespace WebCore {
 
 AudioListener::AudioListener()
     : m_position(0, 0, 0)
-    , m_orientation(0.0, 0.0, -1.0)
-    , m_upVector(0.0, 1.0, 0.0)
+    , m_orientation(0, 0, -1)
+    , m_upVector(0, 1, 0)
     , m_velocity(0, 0, 0)
-    , m_dopplerFactor(1.0)
+    , m_dopplerFactor(1)
     , m_speedOfSound(343.3)
 {
 }
index 5281a8907ccba0c181b517d7c6ab528046840dcf..8b5d8ad1a038b55133c7fb3a7c48c928658d5802 100644 (file)
@@ -45,12 +45,12 @@ public:
     }
 
     // Position
-    void setPosition(double x, double y, double z) { setPosition(FloatPoint3D(x, y, z)); }
+    void setPosition(float x, float y, float z) { setPosition(FloatPoint3D(x, y, z)); }
     void setPosition(const FloatPoint3D &position) { m_position = position; }
     const FloatPoint3D& position() const { return m_position; }
 
     // Orientation
-    void setOrientation(double x, double y, double z, double upX, double upY, double upZ)
+    void setOrientation(float x, float y, float z, float upX, float upY, float upZ)
     {
         setOrientation(FloatPoint3D(x, y, z));
         setUpVector(FloatPoint3D(upX, upY, upZ));
@@ -63,7 +63,7 @@ public:
     const FloatPoint3D& upVector() const { return m_upVector; }
 
     // Velocity
-    void setVelocity(double x, double y, double z) { setVelocity(FloatPoint3D(x, y, z)); }
+    void setVelocity(float x, float y, float z) { setVelocity(FloatPoint3D(x, y, z)); }
     void setVelocity(const FloatPoint3D &velocity) { m_velocity = velocity; }
     const FloatPoint3D& velocity() const { return m_velocity; }
 
index 0c17de60cbe6ac0d97f050630654e18a89353278..c2bbaf71b1ab390fd1e972f88360a68a9aed830d 100644 (file)
@@ -36,7 +36,7 @@
 
 namespace WebCore {
 
-AudioNode::AudioNode(AudioContext* context, double sampleRate)
+AudioNode::AudioNode(AudioContext* context, float sampleRate)
     : m_isInitialized(false)
     , m_nodeType(NodeTypeUnknown)
     , m_context(context)
index 00871dd59639532be6d77c34f723686056f703e7..86ffe7cfa4552615a9b958394bde7ad48edfc7c2 100644 (file)
@@ -48,7 +48,7 @@ class AudioNode {
 public:
     enum { ProcessingSizeInFrames = 128 };
 
-    AudioNode(AudioContext*, double sampleRate);
+    AudioNode(AudioContext*, float sampleRate);
     virtual ~AudioNode();
 
     AudioContext* context() { return m_context.get(); }
@@ -117,7 +117,7 @@ public:
     bool connect(AudioNode* destination, unsigned outputIndex = 0, unsigned inputIndex = 0);
     bool disconnect(unsigned outputIndex = 0);
 
-    double sampleRate() const { return m_sampleRate; }
+    float sampleRate() const { return m_sampleRate; }
 
     // processIfNecessary() is called by our output(s) when the rendering graph needs this AudioNode to process.
     // This method ensures that the AudioNode will only process once per rendering time quantum even if it's called repeatedly.
@@ -150,7 +150,7 @@ private:
     volatile bool m_isInitialized;
     NodeType m_nodeType;
     RefPtr<AudioContext> m_context;
-    double m_sampleRate;
+    float m_sampleRate;
     Vector<OwnPtr<AudioNodeInput> > m_inputs;
     Vector<OwnPtr<AudioNodeOutput> > m_outputs;
 
index 7d26feac947d76c4baf47847f81fae4705c29324..faf7f159e44321446ed64361c2df445d305a8dda 100644 (file)
@@ -46,7 +46,7 @@ static void fixNANs(double &x)
         x = 0.0;
 }
 
-AudioPannerNode::AudioPannerNode(AudioContext* context, double sampleRate)
+AudioPannerNode::AudioPannerNode(AudioContext* context, float sampleRate)
     : AudioNode(context, sampleRate)
     , m_panningModel(Panner::PanningModelHRTF)
     , m_lastGain(-1.0)
@@ -188,7 +188,7 @@ void AudioPannerNode::getAzimuthElevation(double* outAzimuth, double* outElevati
 
     FloatPoint3D up = listenerRight.cross(listenerFrontNorm);
 
-    double upProjection = sourceListener.dot(up);
+    float upProjection = sourceListener.dot(up);
 
     FloatPoint3D projectedSource = sourceListener - upProjection * up;
     projectedSource.normalize();
index 61e34a97c3bde844b12ab0d50b60200e58ab6b2e..6ecebeaae31ca7dac38579f1d5a035de3722cf11 100644 (file)
@@ -53,7 +53,7 @@ public:
         SOUNDFIELD = 2,
     };
 
-    static PassRefPtr<AudioPannerNode> create(AudioContext* context, double sampleRate)
+    static PassRefPtr<AudioPannerNode> create(AudioContext* context, float sampleRate)
     {
         return adoptRef(new AudioPannerNode(context, sampleRate));
     }
@@ -117,7 +117,7 @@ public:
     AudioGain* coneGain() { return m_coneGain.get(); }                                        
 
 private:
-    AudioPannerNode(AudioContext*, double sampleRate);
+    AudioPannerNode(AudioContext*, float sampleRate);
 
     // Returns the combined distance and cone gain attenuation.
     float distanceConeGain();
index 90564984df2d3cf3c8f2e775466463e7e90380f9..be9279fdf180ad04a624bd2501499e52374893f8 100644 (file)
@@ -31,6 +31,7 @@
 
 #include "AudioNode.h"
 #include "AudioUtilities.h"
+#include "FloatConversion.h"
 #include <wtf/MathExtras.h>
 
 namespace WebCore {
@@ -43,13 +44,13 @@ float AudioParam::value()
     // Update value for timeline.
     if (context() && context()->isAudioThread()) {
         bool hasValue;
-        float timelineValue = m_timeline.valueForContextTime(context(), m_value, hasValue);
+        float timelineValue = m_timeline.valueForContextTime(context(), narrowPrecisionToFloat(m_value), hasValue);
 
         if (hasValue)
             m_value = timelineValue;
     }
 
-    return static_cast<float>(m_value);
+    return narrowPrecisionToFloat(m_value);
 }
 
 void AudioParam::setValue(float value)
@@ -62,7 +63,7 @@ void AudioParam::setValue(float value)
 
 float AudioParam::smoothedValue()
 {
-    return static_cast<float>(m_smoothedValue);
+    return narrowPrecisionToFloat(m_smoothedValue);
 }
 
 bool AudioParam::smooth()
@@ -71,7 +72,7 @@ bool AudioParam::smooth()
     // Smoothing effectively is performed by the timeline.
     bool useTimelineValue = false;
     if (context())
-        m_value = m_timeline.valueForContextTime(context(), m_value, useTimelineValue);
+        m_value = m_timeline.valueForContextTime(context(), narrowPrecisionToFloat(m_value), useTimelineValue);
     
     if (m_smoothedValue == m_value) {
         // Smoothed value has already approached and snapped to value.
@@ -102,12 +103,12 @@ void AudioParam::calculateSampleAccurateValues(float* values, unsigned numberOfV
     // Calculate values for this render quantum.
     // Normally numberOfValues will equal AudioNode::ProcessingSizeInFrames (the render quantum size).
     float sampleRate = context()->sampleRate();
-    float startTime = context()->currentTime();
+    float startTime = narrowPrecisionToFloat(context()->currentTime());
     float endTime = startTime + numberOfValues / sampleRate;
 
     // Note we're running control rate at the sample-rate.
     // Pass in the current value as default value.
-    m_value = m_timeline.valuesForTimeRange(startTime, endTime, m_value, values, numberOfValues, sampleRate, sampleRate);
+    m_value = m_timeline.valuesForTimeRange(startTime, endTime, narrowPrecisionToFloat(m_value), values, numberOfValues, sampleRate, sampleRate);
 }
 
 } // namespace WebCore
index 0451884456fae19185708aa2f611beb3d3404643..2ee134ae37a22102896d96e8eac30a3a483f3bcf 100644 (file)
@@ -30,6 +30,7 @@
 #include "AudioParamTimeline.h"
 
 #include "AudioUtilities.h"
+#include "FloatConversion.h"
 #include <algorithm>
 #include <wtf/MathExtras.h>
 
@@ -124,8 +125,8 @@ float AudioParamTimeline::valueForContextTime(AudioContext* context, float defau
     // Ask for just a single value.
     float value;
     float sampleRate = context->sampleRate();
-    float startTime = context->currentTime();
-    float endTime = startTime + 1.1 / sampleRate; // time just beyond one sample-frame
+    float startTime = narrowPrecisionToFloat(context->currentTime());
+    float endTime = startTime + 1.1f / sampleRate; // time just beyond one sample-frame
     float controlRate = sampleRate / AudioNode::ProcessingSizeInFrames; // one parameter change per render quantum
     value = valuesForTimeRange(startTime, endTime, defaultValue, &value, 1, sampleRate, controlRate);
 
@@ -157,7 +158,7 @@ float AudioParamTimeline::valuesForTimeRange(float startTime,
 }
 
 // Returns the rounded down integer sample-frame for the time and sample-rate.
-static unsigned timeToSampleFrame(double time, double sampleRate)
+static unsigned timeToSampleFrame(double time, float sampleRate)
 {
     double k = 0.5 / sampleRate;
     return static_cast<unsigned>((time + k) * sampleRate);
@@ -184,7 +185,7 @@ float AudioParamTimeline::valuesForTimeRangeImpl(float startTime,
     }
 
     // Maintain a running time and index for writing the values buffer.
-    double currentTime = startTime;
+    float currentTime = startTime;
     unsigned writeIndex = 0;
 
     // If first event is after startTime then fill initial part of values buffer with defaultValue
@@ -245,8 +246,8 @@ float AudioParamTimeline::valuesForTimeRangeImpl(float startTime,
                     values[writeIndex] = value;
             } else {
                 // Interpolate in log space.
-                value1 = log2(value1);
-                value2 = log2(value2);
+                value1 = log2f(value1);
+                value2 = log2f(value2);
 
                 // FIXME: optimize to not use pow() in inner loop, this is just a simple exponential ramp.
                 for (; writeIndex < fillToFrame; ++writeIndex) {
index 60913716ba0fdc88ceada2a7355597bb34ea6e60..a6bdd427b99223e787cca3fc0ee277f1616d3884 100644 (file)
@@ -35,7 +35,7 @@ namespace WebCore {
 
 class AudioSourceNode : public AudioNode {
 public:
-    AudioSourceNode(AudioContext* context, double sampleRate)
+    AudioSourceNode(AudioContext* context, float sampleRate)
         : AudioNode(context, sampleRate)
     {
     }
index ced07b8bad501469560f80d7b5179a6b0d3ab824..f16d8e5f96d757bf95d57242b6e2db95ce61cf0a 100644 (file)
@@ -32,7 +32,7 @@
 
 namespace WebCore {
 
-BiquadFilterNode::BiquadFilterNode(AudioContext* context, double sampleRate)
+BiquadFilterNode::BiquadFilterNode(AudioContext* context, float sampleRate)
     : AudioBasicProcessorNode(context, sampleRate)
 {
     // Initially setup as lowpass filter.
index dfe17b3ad6a2912875a3579e66e4cbbd143c3771..683b2a39d474f83eecd6b5652c421ee680b57793 100644 (file)
@@ -46,7 +46,7 @@ public:
         ALLPASS = 7
     };
 
-    static PassRefPtr<BiquadFilterNode> create(AudioContext* context, double sampleRate)
+    static PassRefPtr<BiquadFilterNode> create(AudioContext* context, float sampleRate)
     {
         return adoptRef(new BiquadFilterNode(context, sampleRate));      
     }
@@ -59,7 +59,7 @@ public:
     AudioParam* gain() { return biquadProcessor()->parameter3(); }
     
 private:
-    BiquadFilterNode(AudioContext*, double sampleRate);
+    BiquadFilterNode(AudioContext*, float sampleRate);
 
     BiquadProcessor* biquadProcessor() { return static_cast<BiquadProcessor*>(processor()); }
 };
index 540e1fa1095d76fc89fc519d5aa55944b9b9a8ad..c755277485b679df3e89f3af0e315d8ef6392a69 100644 (file)
@@ -32,7 +32,7 @@
 
 namespace WebCore {
     
-BiquadProcessor::BiquadProcessor(double sampleRate, size_t numberOfChannels, bool autoInitialize)
+BiquadProcessor::BiquadProcessor(float sampleRate, size_t numberOfChannels, bool autoInitialize)
     : AudioDSPKernelProcessor(sampleRate, numberOfChannels)
     , m_type(LowPass)
     , m_parameter1(0)
@@ -51,7 +51,7 @@ BiquadProcessor::BiquadProcessor(double sampleRate, size_t numberOfChannels, boo
         initialize();
 }
 
-BiquadProcessor::BiquadProcessor(FilterType type, double sampleRate, size_t numberOfChannels, bool autoInitialize)
+BiquadProcessor::BiquadProcessor(FilterType type, float sampleRate, size_t numberOfChannels, bool autoInitialize)
     : AudioDSPKernelProcessor(sampleRate, numberOfChannels)
     , m_type(type)
     , m_parameter1(0)
index 3c521852d031786a2c56df69d5066af385ba35a8..e7487262ada10eab548b59a34a5b8102353175f5 100644 (file)
@@ -49,10 +49,10 @@ public:
         Allpass = 7
     };
 
-    BiquadProcessor(double sampleRate, size_t numberOfChannels, bool autoInitialize);
+    BiquadProcessor(float sampleRate, size_t numberOfChannels, bool autoInitialize);
 
     // Old constructor used by deprecated LowPass2FilterNode and HighPass2FilterNode
-    BiquadProcessor(FilterType, double sampleRate, size_t numberOfChannels, bool autoInitialize = true);
+    BiquadProcessor(FilterType, float sampleRate, size_t numberOfChannels, bool autoInitialize = true);
 
     virtual ~BiquadProcessor();
     
index 28d57db31042b789d3af4bd41b78af739833828c..31583561971a699bb30c5a28c1afc2aacb1d63f4 100644 (file)
@@ -45,7 +45,7 @@ const size_t MaxFFTSize = 32768;
 
 namespace WebCore {
 
-ConvolverNode::ConvolverNode(AudioContext* context, double sampleRate)
+ConvolverNode::ConvolverNode(AudioContext* context, float sampleRate)
     : AudioNode(context, sampleRate)
 {
     addInput(adoptPtr(new AudioNodeInput(this)));
index 7b71ba96dca5e07509ffa6197fa9e3de616df914..fb29d5c5d287b90be4506675f355bfbbb7843d9e 100644 (file)
@@ -37,7 +37,7 @@ class Reverb;
     
 class ConvolverNode : public AudioNode {
 public:
-    static PassRefPtr<ConvolverNode> create(AudioContext* context, double sampleRate)
+    static PassRefPtr<ConvolverNode> create(AudioContext* context, float sampleRate)
     {
         return adoptRef(new ConvolverNode(context, sampleRate));      
     }
@@ -55,7 +55,7 @@ public:
     AudioBuffer* buffer();
 
 private:
-    ConvolverNode(AudioContext*, double sampleRate);
+    ConvolverNode(AudioContext*, float sampleRate);
 
     OwnPtr<Reverb> m_reverb;
     RefPtr<AudioBuffer> m_buffer;
index f1a726333893a35609ad9f7442d19279cfc8c31f..f7db34e058cabe161603caba009fd1f19d49811f 100644 (file)
@@ -50,7 +50,7 @@ void DefaultAudioDestinationNode::initialize()
     if (isInitialized())
         return;
 
-    double hardwareSampleRate = AudioDestination::hardwareSampleRate();
+    float hardwareSampleRate = AudioDestination::hardwareSampleRate();
 #ifndef NDEBUG    
     fprintf(stderr, ">>>> hardwareSampleRate = %f\n", hardwareSampleRate);
 #endif
index f1d689db09c93598bb34635872d1731686d48c12..630bdc3352d75a734286977dfb9a2be44f1e7a27 100644 (file)
@@ -46,7 +46,7 @@ public:
     virtual void initialize();
     virtual void uninitialize();
     
-    double sampleRate() const { return m_destination->sampleRate(); }
+    float sampleRate() const { return m_destination->sampleRate(); }
 
     virtual void startRendering();
     
index 8e0b40efcbb7d1e0f39f6603266bcc22424dd6b2..aeda189c5a9557102317b480f20ab986ce2f4f83 100644 (file)
@@ -33,8 +33,8 @@
 
 using namespace std;
   
-const double DefaultMaxDelayTime = 1.0;
-const double SmoothingTimeConstant = 0.020; // 20ms
+const float DefaultMaxDelayTime = 1;
+const float SmoothingTimeConstant = 0.020f; // 20ms
   
 namespace WebCore {
 
@@ -54,7 +54,7 @@ DelayDSPKernel::DelayDSPKernel(DelayProcessor* processor)
     m_smoothingRate = AudioUtilities::discreteTimeConstantForSampleRate(SmoothingTimeConstant, processor->sampleRate());
 }
 
-DelayDSPKernel::DelayDSPKernel(double maxDelayTime, double sampleRate)
+DelayDSPKernel::DelayDSPKernel(double maxDelayTime, float sampleRate)
     : AudioDSPKernel(sampleRate)
     , m_maxDelayTime(maxDelayTime)
     , m_writeIndex(0)
@@ -88,7 +88,7 @@ void DelayDSPKernel::process(const float* source, float* destination, size_t fra
     if (!source || !destination)
         return;
         
-    double sampleRate = this->sampleRate();
+    float sampleRate = this->sampleRate();
     double delayTime = delayProcessor() ? delayProcessor()->delayTime()->value() : m_desiredDelayFrames / sampleRate;
 
     // Make sure the delay time is in a valid range.
index 2ae36cb8647e06006ce5254db45b1d7c30e1e826..79a39568b788f171f12f67f0bfad73afd16e88b4 100644 (file)
@@ -36,7 +36,7 @@ class DelayProcessor;
 class DelayDSPKernel : public AudioDSPKernel {
 public:  
     DelayDSPKernel(DelayProcessor*);
-    DelayDSPKernel(double maxDelayTime, double sampleRate);
+    DelayDSPKernel(double maxDelayTime, float sampleRate);
     
     virtual void process(const float* source, float* destination, size_t framesToProcess);
     virtual void reset();
index b578d60480cec1863a62cf67f27cbee8dc7f5102..2a525dd1a6c18b9dd5c44a7e344b70e9b149f901 100644 (file)
@@ -30,7 +30,7 @@
 
 namespace WebCore {
 
-DelayNode::DelayNode(AudioContext* context, double sampleRate)
+DelayNode::DelayNode(AudioContext* context, float sampleRate)
     : AudioBasicProcessorNode(context, sampleRate)
 {
     m_processor = adoptPtr(new DelayProcessor(sampleRate, 1));    
index 93ad2279d3165d61f7cc030db3fe9ed37250e09e..3d58a89137a36a3cd5df8ce9ae8552b3728e26f6 100644 (file)
@@ -35,7 +35,7 @@ class AudioParam;
 
 class DelayNode : public AudioBasicProcessorNode {
 public:
-    static PassRefPtr<DelayNode> create(AudioContext* context, double sampleRate)
+    static PassRefPtr<DelayNode> create(AudioContext* context, float sampleRate)
     {
         return adoptRef(new DelayNode(context, sampleRate));      
     }
@@ -43,7 +43,7 @@ public:
     AudioParam* delayTime();
 
 private:
-    DelayNode(AudioContext*, double sampleRate);
+    DelayNode(AudioContext*, float sampleRate);
 
     DelayProcessor* delayProcessor() { return static_cast<DelayProcessor*>(processor()); }
 };
index 5fdc8dfc2a1a562f59645ed79c8fc5fafea2cde4..8ed3e4337e09f13017fc6f3f7ef2fbdc8f2326ca 100644 (file)
@@ -32,7 +32,7 @@
 
 namespace WebCore {
 
-DelayProcessor::DelayProcessor(double sampleRate, unsigned numberOfChannels)
+DelayProcessor::DelayProcessor(float sampleRate, unsigned numberOfChannels)
     : AudioDSPKernelProcessor(sampleRate, numberOfChannels)
 {
     m_delayTime = AudioParam::create("delayTime", 0.0, 0.0, 1.0);
index 4844c4b2681af829878de9e9f31e8ab1a2ef84dd..15428ce759685fbf28b85a060d0cea0b3b3334e0 100644 (file)
@@ -37,7 +37,7 @@ class AudioDSPKernel;
     
 class DelayProcessor : public AudioDSPKernelProcessor {
 public:
-    DelayProcessor(double sampleRate, unsigned numberOfChannels);
+    DelayProcessor(float sampleRate, unsigned numberOfChannels);
     virtual ~DelayProcessor();
     
     virtual PassOwnPtr<AudioDSPKernel> createKernel();
index 6feb2960918689a68b703f0852327f4b145b6d2f..ea8b58922356aa1365132aef31faaf603f745427 100644 (file)
@@ -35,7 +35,7 @@
 
 namespace WebCore {
 
-DynamicsCompressorNode::DynamicsCompressorNode(AudioContext* context, double sampleRate)
+DynamicsCompressorNode::DynamicsCompressorNode(AudioContext* context, float sampleRate)
     : AudioNode(context, sampleRate)
 {
     addInput(adoptPtr(new AudioNodeInput(this)));
index 7ae75ead02b495d76c466aada964b00db6db4ba0..d718ab341cc01e188cf2130567465f048bda05cb 100644 (file)
@@ -34,7 +34,7 @@ class DynamicsCompressor;
     
 class DynamicsCompressorNode : public AudioNode {
 public:
-    static PassRefPtr<DynamicsCompressorNode> create(AudioContext* context, double sampleRate)
+    static PassRefPtr<DynamicsCompressorNode> create(AudioContext* context, float sampleRate)
     {
         return adoptRef(new DynamicsCompressorNode(context, sampleRate));      
     }
@@ -48,7 +48,7 @@ public:
     virtual void uninitialize();
 
 private:
-    DynamicsCompressorNode(AudioContext*, double sampleRate);
+    DynamicsCompressorNode(AudioContext*, float sampleRate);
 
     OwnPtr<DynamicsCompressor> m_dynamicsCompressor;
 };
index 27946dd99fa3409fcbb5a6df66088fdeba536638..520785f8068fd525ec2bb5a22a7b8f008d1ae07b 100644 (file)
@@ -30,7 +30,7 @@
 
 namespace WebCore {
 
-HighPass2FilterNode::HighPass2FilterNode(AudioContext* context, double sampleRate)
+HighPass2FilterNode::HighPass2FilterNode(AudioContext* context, float sampleRate)
     : AudioBasicProcessorNode(context, sampleRate)
 {
     m_processor = adoptPtr(new BiquadProcessor(BiquadProcessor::HighPass, sampleRate, 1, false));
index be0beb6c236d2c2fdac5bd0ab3aa30949bb7fe1a..bfd76a0d584c1a885eb9d350992998ffee3c5a63 100644 (file)
@@ -34,7 +34,7 @@ class AudioParam;
     
 class HighPass2FilterNode : public AudioBasicProcessorNode {
 public:
-    static PassRefPtr<HighPass2FilterNode> create(AudioContext* context, double sampleRate)
+    static PassRefPtr<HighPass2FilterNode> create(AudioContext* context, float sampleRate)
     {
         return adoptRef(new HighPass2FilterNode(context, sampleRate));      
     }
@@ -43,7 +43,7 @@ public:
     AudioParam* resonance() { return biquadProcessor()->parameter2(); }
     
 private:
-    HighPass2FilterNode(AudioContext*, double sampleRate);
+    HighPass2FilterNode(AudioContext*, float sampleRate);
 
     BiquadProcessor* biquadProcessor() { return static_cast<BiquadProcessor*>(processor()); }
 };
index 487299b4602d7bc6ae1ddc62e4b3525f133a65e7..05e21b7a310a158a18448a955a0328a371562766 100644 (file)
@@ -42,12 +42,12 @@ namespace WebCore {
 
 const size_t DefaultBufferSize = 4096;
 
-PassRefPtr<JavaScriptAudioNode> JavaScriptAudioNode::create(AudioContext* context, double sampleRate, size_t bufferSize, unsigned numberOfInputs, unsigned numberOfOutputs)
+PassRefPtr<JavaScriptAudioNode> JavaScriptAudioNode::create(AudioContext* context, float sampleRate, size_t bufferSize, unsigned numberOfInputs, unsigned numberOfOutputs)
 {
     return adoptRef(new JavaScriptAudioNode(context, sampleRate, bufferSize, numberOfInputs, numberOfOutputs));
 }
 
-JavaScriptAudioNode::JavaScriptAudioNode(AudioContext* context, double sampleRate, size_t bufferSize, unsigned numberOfInputs, unsigned numberOfOutputs)
+JavaScriptAudioNode::JavaScriptAudioNode(AudioContext* context, float sampleRate, size_t bufferSize, unsigned numberOfInputs, unsigned numberOfOutputs)
     : AudioNode(context, sampleRate)
     , m_doubleBufferIndex(0)
     , m_doubleBufferIndexForEvent(0)
@@ -96,7 +96,7 @@ void JavaScriptAudioNode::initialize()
     if (isInitialized())
         return;
 
-    double sampleRate = context()->sampleRate();
+    float sampleRate = context()->sampleRate();
 
     // Create double buffers on both the input and output sides.
     // These AudioBuffers will be directly accessed in the main thread by JavaScript.
index e99a25d00648c6273e6dc2aaa5dfe36976bde731..5a299c490cff35757e541d7fb605c1db658425dc 100644 (file)
@@ -52,7 +52,7 @@ public:
     // This value controls how frequently the onaudioprocess event handler is called and how many sample-frames need to be processed each call.
     // Lower numbers for bufferSize will result in a lower (better) latency. Higher numbers will be necessary to avoid audio breakup and glitches.
     // The value chosen must carefully balance between latency and audio quality.
-    static PassRefPtr<JavaScriptAudioNode> create(AudioContext*, double sampleRate, size_t bufferSize, unsigned numberOfInputs = 1, unsigned numberOfOutputs = 1);
+    static PassRefPtr<JavaScriptAudioNode> create(AudioContext*, float sampleRate, size_t bufferSize, unsigned numberOfInputs = 1, unsigned numberOfOutputs = 1);
 
     virtual ~JavaScriptAudioNode();
 
@@ -77,7 +77,7 @@ public:
     using AudioNode::deref;
     
 private:
-    JavaScriptAudioNode(AudioContext*, double sampleRate, size_t bufferSize, unsigned numberOfInputs, unsigned numberOfOutputs);
+    JavaScriptAudioNode(AudioContext*, float sampleRate, size_t bufferSize, unsigned numberOfInputs, unsigned numberOfOutputs);
 
     static void fireProcessEventDispatch(void* userData);
     void fireProcessEvent();
index ac2de1ff24e603bb9f86e3cbec5502897d756f9b..e2d669acd19ee294d404f68776034b03b99c2524 100644 (file)
@@ -30,7 +30,7 @@
 
 namespace WebCore {
 
-LowPass2FilterNode::LowPass2FilterNode(AudioContext* context, double sampleRate)
+LowPass2FilterNode::LowPass2FilterNode(AudioContext* context, float sampleRate)
     : AudioBasicProcessorNode(context, sampleRate)
 {
     m_processor = adoptPtr(new BiquadProcessor(BiquadProcessor::LowPass, sampleRate, 1, false));
index 43d70513d27dc30e941847c19b3f0d28002d6523..3342c6f96433420f43c6e1a8869073f80a62de72 100644 (file)
@@ -34,7 +34,7 @@ class AudioParam;
     
 class LowPass2FilterNode : public AudioBasicProcessorNode {
 public:
-    static PassRefPtr<LowPass2FilterNode> create(AudioContext* context, double sampleRate)
+    static PassRefPtr<LowPass2FilterNode> create(AudioContext* context, float sampleRate)
     {
         return adoptRef(new LowPass2FilterNode(context, sampleRate));      
     }
@@ -43,7 +43,7 @@ public:
     AudioParam* resonance() { return biquadProcessor()->parameter2(); }
     
 private:
-    LowPass2FilterNode(AudioContext*, double sampleRate);
+    LowPass2FilterNode(AudioContext*, float sampleRate);
 
     BiquadProcessor* biquadProcessor() { return static_cast<BiquadProcessor*>(processor()); }
 };
index 714c120519e19289d640b3bbccbad85057fe5557..c4d567a5d29dc5a9084a5507c27739eae5c5ec50 100644 (file)
@@ -49,7 +49,7 @@ public:
     virtual void initialize();
     virtual void uninitialize();
     
-    double sampleRate() const { return m_renderTarget->sampleRate(); }
+    float sampleRate() const { return m_renderTarget->sampleRate(); }
 
     void startRendering();
     
index 449c84afef971a05cb2372ccf31c7c3c1a4221f0..8d74a67a80a72767d78a06edd86fa727b63dd7ef 100644 (file)
@@ -33,7 +33,7 @@
 
 namespace WebCore {
 
-RealtimeAnalyserNode::RealtimeAnalyserNode(AudioContext* context, double sampleRate)
+RealtimeAnalyserNode::RealtimeAnalyserNode(AudioContext* context, float sampleRate)
     : AudioNode(context, sampleRate)
 {
     addInput(adoptPtr(new AudioNodeInput(this)));
index d00a5b6f00a6ba2311fca5dbc8aa959a8b76d0a2..6c7add959248bd7169e00be32ca7eb283f1ad990 100644 (file)
@@ -32,7 +32,7 @@ namespace WebCore {
 
 class RealtimeAnalyserNode : public AudioNode {
 public:
-    static PassRefPtr<RealtimeAnalyserNode> create(AudioContext* context, double sampleRate)
+    static PassRefPtr<RealtimeAnalyserNode> create(AudioContext* context, float sampleRate)
     {
         return adoptRef(new RealtimeAnalyserNode(context, sampleRate));      
     }
@@ -64,7 +64,7 @@ public:
     void getByteTimeDomainData(Uint8Array* array) { m_analyser.getByteTimeDomainData(array); }
 
 private:
-    RealtimeAnalyserNode(AudioContext*, double sampleRate);
+    RealtimeAnalyserNode(AudioContext*, float sampleRate);
 
     RealtimeAnalyser m_analyser;
 };
index 416b72234eaa54dc77daab672e2e82bcb3663b89..5f3cf3476230ec042b9153c1e338c5757a1ba83e 100644 (file)
@@ -61,7 +61,7 @@ void WaveShaperDSPKernel::process(const float* source, float* destination, size_
         const float input = source[i];
 
         // Calculate an index based on input -1 -> +1 with 0 being at the center of the curve data.
-        int index = curveLength * 0.5 * (input + 1);
+        int index = (curveLength * (input + 1)) / 2;
 
         // Clip index to the input range of the curve.
         // This takes care of input outside of nominal range -1 -> +1
index abd522ca79ddc84b3a4964439e9a1406e704bcea..f7571debb712c954d2af9a48f6641a8237660d6c 100644 (file)
@@ -32,7 +32,7 @@
 
 namespace WebCore {
     
-WaveShaperProcessor::WaveShaperProcessor(double sampleRate, size_t numberOfChannels)
+WaveShaperProcessor::WaveShaperProcessor(float sampleRate, size_t numberOfChannels)
     : AudioDSPKernelProcessor(sampleRate, numberOfChannels)
 {
 }
index 86a188d33c595abe435ba20d2066c86b82bf4bd3..eb672606e413a62a4e251d6f450435fc4c6d1347 100644 (file)
@@ -38,7 +38,7 @@ namespace WebCore {
 
 class WaveShaperProcessor : public AudioDSPKernelProcessor {
 public:
-    WaveShaperProcessor(double sampleRate, size_t numberOfChannels);
+    WaveShaperProcessor(float sampleRate, size_t numberOfChannels);
 
     virtual ~WaveShaperProcessor();
 
index 0a66a8edf845fc7524788ea0b0563b9a1a94da6a..e4e83b2ca5c9a41cb06fe510da0c074b490f0315 100644 (file)
@@ -1,3 +1,20 @@
+2011-10-05  Jer Noble  <jer.noble@apple.com>
+
+        WEB_AUDIO does not compile on Leopard 32-bit.
+        https://bugs.webkit.org/show_bug.cgi?id=69292
+
+        Reviewed by Simon Fraser.
+
+        Platform-independent portions of WEB_AUDIO have changed from double -> float, and 
+        platform-specific subclasses must change as well.
+
+        * src/AudioDestinationChromium.cpp:
+        (WebCore::AudioDestination::create):
+        (WebCore::AudioDestinationChromium::AudioDestinationChromium):
+        (WebCore::AudioDestination::hardwareSampleRate):
+        * src/AudioDestinationChromium.h:
+        (WebCore::AudioDestinationChromium::sampleRate):
+
 2011-10-05  James Robinson  <jamesr@chromium.org>
 
         [chromium] Hook WebCompositor interface for input events up to the compositor proper
index 2be1ff5de2298e7a76bd37dfb60afc4962d530a7..9499a1ce7c3440a7d01c840b2c17825a75b11a0e 100644 (file)
@@ -50,12 +50,12 @@ const size_t maximumCallbackBufferSize = 16384;
 const unsigned numberOfChannels = 2;
 
 // Factory method: Chromium-implementation
-PassOwnPtr<AudioDestination> AudioDestination::create(AudioSourceProvider& provider, double sampleRate)
+PassOwnPtr<AudioDestination> AudioDestination::create(AudioSourceProvider& provider, float sampleRate)
 {
     return adoptPtr(new AudioDestinationChromium(provider, sampleRate));
 }
 
-AudioDestinationChromium::AudioDestinationChromium(AudioSourceProvider& provider, double sampleRate)
+AudioDestinationChromium::AudioDestinationChromium(AudioSourceProvider& provider, float sampleRate)
     : m_provider(provider)
     , m_renderBus(numberOfChannels, renderBufferSize, false)
     , m_sampleRate(sampleRate)
@@ -101,9 +101,9 @@ void AudioDestinationChromium::stop()
     }
 }
 
-double AudioDestination::hardwareSampleRate()
+float AudioDestination::hardwareSampleRate()
 {
-    return webKitPlatformSupport()->audioHardwareSampleRate();
+    return static_cast<float>(webKitPlatformSupport()->audioHardwareSampleRate());
 }
 
 // Pulls on our provider to get the rendered audio stream.
index ecfc4b07d4b4ec2418daf14284106f7b6f26fd76..13a3f52fd83f7d72bc0f0a4bcffd3b03a0da17ad 100644 (file)
@@ -42,14 +42,14 @@ namespace WebCore {
 
 class AudioDestinationChromium : public AudioDestination, public WebKit::WebAudioDevice::RenderCallback {
 public:
-    AudioDestinationChromium(AudioSourceProvider&, double sampleRate);
+    AudioDestinationChromium(AudioSourceProvider&, float sampleRate);
     virtual ~AudioDestinationChromium();
 
     virtual void start();
     virtual void stop();
     bool isPlaying() { return m_isPlaying; }
 
-    double sampleRate() const { return m_sampleRate; }
+    float sampleRate() const { return m_sampleRate; }
 
     // WebKit::WebAudioDevice::RenderCallback
     virtual void render(const WebKit::WebVector<float*>& audioData, size_t numberOfFrames);
@@ -57,7 +57,7 @@ public:
 private:
     AudioSourceProvider& m_provider;
     AudioBus m_renderBus;
-    double m_sampleRate;
+    float m_sampleRate;
     bool m_isPlaying;
     OwnPtr<WebKit::WebAudioDevice> m_audioDevice;
     size_t m_callbackBufferSize;