WEB_AUDIO does not compile on Leopard 32-bit.
authorjer.noble@apple.com <jer.noble@apple.com@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Wed, 5 Oct 2011 20:53:43 +0000 (20:53 +0000)
committerjer.noble@apple.com <jer.noble@apple.com@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Wed, 5 Oct 2011 20:53:43 +0000 (20:53 +0000)
https://bugs.webkit.org/show_bug.cgi?id=69292

Reviewed by Simon Fraser.

Source/WebCore:

No new tests; covered by all existing audio tests.

Use of float and double within the WEB_AUDIO implementation have been harmonized, with most
calculations done using floats, with narrowPrecisionToFloat() added when necessary to
narrow double results down to floats, and with float constants initialized with float values:
* platform/audio/AudioBus.cpp:
(WebCore::AudioBus::AudioBus):
(WebCore::AudioBus::createByMixingToMono):
* platform/audio/AudioBus.h:
(WebCore::AudioBus::sampleRate):
(WebCore::AudioBus::setSampleRate):
* platform/audio/AudioDSPKernel.h:
(WebCore::AudioDSPKernel::AudioDSPKernel):
(WebCore::AudioDSPKernel::sampleRate):
* platform/audio/AudioDSPKernelProcessor.cpp:
(WebCore::AudioDSPKernelProcessor::AudioDSPKernelProcessor):
* platform/audio/AudioDSPKernelProcessor.h:
* platform/audio/AudioDestination.h:
* platform/audio/AudioFileReader.h:
* platform/audio/AudioProcessor.h:
(WebCore::AudioProcessor::AudioProcessor):
(WebCore::AudioProcessor::sampleRate):
* platform/audio/AudioUtilities.cpp:
(WebCore::AudioUtilities::decibelsToLinear):
(WebCore::AudioUtilities::linearToDecibels):
(WebCore::AudioUtilities::discreteTimeConstantForSampleRate):
* platform/audio/AudioUtilities.h:
* platform/audio/DynamicsCompressor.cpp:
(WebCore::DynamicsCompressor::DynamicsCompressor):
(WebCore::DynamicsCompressor::initializeParameters):
(WebCore::DynamicsCompressor::parameterValue):
(WebCore::DynamicsCompressor::setEmphasisStageParameters):
(WebCore::DynamicsCompressor::process):
* platform/audio/DynamicsCompressor.h:
(WebCore::DynamicsCompressor::sampleRate):
(WebCore::DynamicsCompressor::nyquist):
* platform/audio/DynamicsCompressorKernel.cpp:
(WebCore::saturate):
(WebCore::DynamicsCompressorKernel::DynamicsCompressorKernel):
(WebCore::DynamicsCompressorKernel::process):
* platform/audio/DynamicsCompressorKernel.h:
* platform/audio/EqualPowerPanner.cpp:
(WebCore::EqualPowerPanner::EqualPowerPanner):
* platform/audio/EqualPowerPanner.h:
* platform/audio/HRTFDatabase.cpp:
(WebCore::HRTFDatabase::create):
(WebCore::HRTFDatabase::HRTFDatabase):
* platform/audio/HRTFDatabase.h:
(WebCore::HRTFDatabase::sampleRate):
* platform/audio/HRTFDatabaseLoader.cpp:
(WebCore::HRTFDatabaseLoader::createAndLoadAsynchronouslyIfNecessary):
(WebCore::HRTFDatabaseLoader::HRTFDatabaseLoader):
* platform/audio/HRTFDatabaseLoader.h:
(WebCore::HRTFDatabaseLoader::databaseSampleRate):
* platform/audio/HRTFElevation.cpp:
(WebCore::HRTFElevation::calculateSymmetricKernelsForAzimuthElevation):
(WebCore::HRTFElevation::calculateKernelsForAzimuthElevation):
(WebCore::HRTFElevation::createForSubject):
(WebCore::HRTFElevation::createByInterpolatingSlices):
* platform/audio/HRTFElevation.h:
(WebCore::HRTFElevation::sampleRate):
(WebCore::HRTFElevation::HRTFElevation):
* platform/audio/HRTFKernel.cpp:
(WebCore::extractAverageGroupDelay):
(WebCore::HRTFKernel::HRTFKernel):
(WebCore::HRTFKernel::createInterpolatedKernel):
* platform/audio/HRTFKernel.h:
(WebCore::HRTFKernel::create):
(WebCore::HRTFKernel::frameDelay):
(WebCore::HRTFKernel::sampleRate):
(WebCore::HRTFKernel::HRTFKernel):
* platform/audio/HRTFPanner.cpp:
(WebCore::HRTFPanner::HRTFPanner):
(WebCore::HRTFPanner::fftSizeForSampleRate):
* platform/audio/HRTFPanner.h:
(WebCore::HRTFPanner::sampleRate):
* platform/audio/Panner.cpp:
(WebCore::Panner::create):
* platform/audio/Panner.h:
* platform/audio/chromium/AudioBusChromium.cpp:
(WebCore::AudioBus::loadPlatformResource):
* platform/audio/mac/AudioBusMac.mm:
(WebCore::AudioBus::loadPlatformResource):
* platform/audio/mac/AudioDestinationMac.cpp:
(WebCore::AudioDestination::create):
(WebCore::AudioDestination::hardwareSampleRate):
(WebCore::AudioDestinationMac::AudioDestinationMac):
* platform/audio/mac/AudioDestinationMac.h:
(WebCore::AudioDestinationMac::sampleRate):
* platform/audio/mac/AudioFileReaderMac.cpp:
(WebCore::AudioFileReader::createBus):
(WebCore::createBusFromAudioFile):
(WebCore::createBusFromInMemoryAudioFile):
* platform/audio/mac/AudioFileReaderMac.h:
* webaudio/AsyncAudioDecoder.cpp:
(WebCore::AsyncAudioDecoder::decodeAsync):
(WebCore::AsyncAudioDecoder::DecodingTask::create):
(WebCore::AsyncAudioDecoder::DecodingTask::DecodingTask):
* webaudio/AsyncAudioDecoder.h:
(WebCore::AsyncAudioDecoder::DecodingTask::sampleRate):
* webaudio/AudioBasicProcessorNode.cpp:
(WebCore::AudioBasicProcessorNode::AudioBasicProcessorNode):
* webaudio/AudioBasicProcessorNode.h:
* webaudio/AudioBuffer.cpp:
(WebCore::AudioBuffer::create):
(WebCore::AudioBuffer::createFromAudioFileData):
(WebCore::AudioBuffer::AudioBuffer):
* webaudio/AudioBuffer.h:
(WebCore::AudioBuffer::sampleRate):
* webaudio/AudioBufferSourceNode.cpp:
(WebCore::AudioBufferSourceNode::create):
(WebCore::AudioBufferSourceNode::AudioBufferSourceNode):
(WebCore::AudioBufferSourceNode::process):
(WebCore::AudioBufferSourceNode::renderFromBuffer):
* webaudio/AudioBufferSourceNode.h:
* webaudio/AudioChannelMerger.cpp:
(WebCore::AudioChannelMerger::AudioChannelMerger):
* webaudio/AudioChannelMerger.h:
(WebCore::AudioChannelMerger::create):
* webaudio/AudioChannelSplitter.cpp:
(WebCore::AudioChannelSplitter::AudioChannelSplitter):
* webaudio/AudioChannelSplitter.h:
(WebCore::AudioChannelSplitter::create):
* webaudio/AudioContext.cpp:
(WebCore::AudioContext::createOfflineContext):
(WebCore::AudioContext::AudioContext):
(WebCore::AudioContext::createBuffer):
* webaudio/AudioContext.h:
(WebCore::AudioContext::sampleRate):
* webaudio/AudioDestinationNode.cpp:
(WebCore::AudioDestinationNode::AudioDestinationNode):
* webaudio/AudioDestinationNode.h:
* webaudio/AudioGainNode.cpp:
(WebCore::AudioGainNode::AudioGainNode):
* webaudio/AudioGainNode.h:
(WebCore::AudioGainNode::create):
* webaudio/AudioListener.cpp:
(WebCore::AudioListener::AudioListener):
* webaudio/AudioListener.h:
(WebCore::AudioListener::setPosition):
(WebCore::AudioListener::setOrientation):
(WebCore::AudioListener::setVelocity):
* webaudio/AudioNode.cpp:
(WebCore::AudioNode::AudioNode):
* webaudio/AudioNode.h:
(WebCore::AudioNode::sampleRate):
* webaudio/AudioPannerNode.cpp:
(WebCore::AudioPannerNode::AudioPannerNode):
(WebCore::AudioPannerNode::getAzimuthElevation):
* webaudio/AudioPannerNode.h:
(WebCore::AudioPannerNode::create):
* webaudio/AudioParam.cpp:
(WebCore::AudioParam::value):
(WebCore::AudioParam::smoothedValue):
(WebCore::AudioParam::smooth):
(WebCore::AudioParam::calculateSampleAccurateValues):
* webaudio/AudioParamTimeline.cpp:
(WebCore::AudioParamTimeline::valueForContextTime):
(WebCore::timeToSampleFrame):
(WebCore::AudioParamTimeline::valuesForTimeRangeImpl):
* webaudio/AudioSourceNode.h:
(WebCore::AudioSourceNode::AudioSourceNode):
* webaudio/BiquadFilterNode.cpp:
(WebCore::BiquadFilterNode::BiquadFilterNode):
* webaudio/BiquadFilterNode.h:
(WebCore::BiquadFilterNode::create):
* webaudio/BiquadProcessor.cpp:
(WebCore::BiquadProcessor::BiquadProcessor):
* webaudio/BiquadProcessor.h:
* webaudio/ConvolverNode.cpp:
(WebCore::ConvolverNode::ConvolverNode):
* webaudio/ConvolverNode.h:
(WebCore::ConvolverNode::create):
* webaudio/DefaultAudioDestinationNode.cpp:
(WebCore::DefaultAudioDestinationNode::initialize):
* webaudio/DefaultAudioDestinationNode.h:
(WebCore::DefaultAudioDestinationNode::sampleRate):
* webaudio/DelayDSPKernel.cpp:
(WebCore::DelayDSPKernel::DelayDSPKernel):
(WebCore::DelayDSPKernel::process):
* webaudio/DelayDSPKernel.h:
* webaudio/DelayNode.cpp:
(WebCore::DelayNode::DelayNode):
* webaudio/DelayNode.h:
(WebCore::DelayNode::create):
* webaudio/DelayProcessor.cpp:
(WebCore::DelayProcessor::DelayProcessor):
* webaudio/DelayProcessor.h:
* webaudio/DynamicsCompressorNode.cpp:
(WebCore::DynamicsCompressorNode::DynamicsCompressorNode):
* webaudio/DynamicsCompressorNode.h:
(WebCore::DynamicsCompressorNode::create):
* webaudio/HighPass2FilterNode.cpp:
(WebCore::HighPass2FilterNode::HighPass2FilterNode):
* webaudio/HighPass2FilterNode.h:
(WebCore::HighPass2FilterNode::create):
* webaudio/JavaScriptAudioNode.cpp:
(WebCore::JavaScriptAudioNode::create):
(WebCore::JavaScriptAudioNode::JavaScriptAudioNode):
(WebCore::JavaScriptAudioNode::initialize):
* webaudio/JavaScriptAudioNode.h:
* webaudio/LowPass2FilterNode.cpp:
(WebCore::LowPass2FilterNode::LowPass2FilterNode):
* webaudio/LowPass2FilterNode.h:
(WebCore::LowPass2FilterNode::create):
* webaudio/OfflineAudioDestinationNode.h:
(WebCore::OfflineAudioDestinationNode::sampleRate):
* webaudio/RealtimeAnalyserNode.cpp:
(WebCore::RealtimeAnalyserNode::RealtimeAnalyserNode):
* webaudio/RealtimeAnalyserNode.h:
(WebCore::RealtimeAnalyserNode::create):
* webaudio/WaveShaperDSPKernel.cpp:
(WebCore::WaveShaperDSPKernel::process):
* webaudio/WaveShaperProcessor.cpp:
(WebCore::WaveShaperProcessor::WaveShaperProcessor):
* webaudio/WaveShaperProcessor.h:

Source/WebKit/chromium:

Platform-independent portions of WEB_AUDIO have changed from double -> float, and
platform-specific subclasses must change as well.

* src/AudioDestinationChromium.cpp:
(WebCore::AudioDestination::create):
(WebCore::AudioDestinationChromium::AudioDestinationChromium):
(WebCore::AudioDestination::hardwareSampleRate):
* src/AudioDestinationChromium.h:
(WebCore::AudioDestinationChromium::sampleRate):

git-svn-id: https://svn.webkit.org/repository/webkit/trunk@96745 268f45cc-cd09-0410-ab3c-d52691b4dbfc

93 files changed:
Source/WebCore/ChangeLog
Source/WebCore/platform/audio/AudioBus.cpp
Source/WebCore/platform/audio/AudioBus.h
Source/WebCore/platform/audio/AudioDSPKernel.h
Source/WebCore/platform/audio/AudioDSPKernelProcessor.cpp
Source/WebCore/platform/audio/AudioDSPKernelProcessor.h
Source/WebCore/platform/audio/AudioDestination.h
Source/WebCore/platform/audio/AudioFileReader.h
Source/WebCore/platform/audio/AudioProcessor.h
Source/WebCore/platform/audio/AudioUtilities.cpp
Source/WebCore/platform/audio/AudioUtilities.h
Source/WebCore/platform/audio/DynamicsCompressor.cpp
Source/WebCore/platform/audio/DynamicsCompressor.h
Source/WebCore/platform/audio/DynamicsCompressorKernel.cpp
Source/WebCore/platform/audio/DynamicsCompressorKernel.h
Source/WebCore/platform/audio/EqualPowerPanner.cpp
Source/WebCore/platform/audio/EqualPowerPanner.h
Source/WebCore/platform/audio/HRTFDatabase.cpp
Source/WebCore/platform/audio/HRTFDatabase.h
Source/WebCore/platform/audio/HRTFDatabaseLoader.cpp
Source/WebCore/platform/audio/HRTFDatabaseLoader.h
Source/WebCore/platform/audio/HRTFElevation.cpp
Source/WebCore/platform/audio/HRTFElevation.h
Source/WebCore/platform/audio/HRTFKernel.cpp
Source/WebCore/platform/audio/HRTFKernel.h
Source/WebCore/platform/audio/HRTFPanner.cpp
Source/WebCore/platform/audio/HRTFPanner.h
Source/WebCore/platform/audio/Panner.cpp
Source/WebCore/platform/audio/Panner.h
Source/WebCore/platform/audio/chromium/AudioBusChromium.cpp
Source/WebCore/platform/audio/mac/AudioBusMac.mm
Source/WebCore/platform/audio/mac/AudioDestinationMac.cpp
Source/WebCore/platform/audio/mac/AudioDestinationMac.h
Source/WebCore/platform/audio/mac/AudioFileReaderMac.cpp
Source/WebCore/platform/audio/mac/AudioFileReaderMac.h
Source/WebCore/webaudio/AsyncAudioDecoder.cpp
Source/WebCore/webaudio/AsyncAudioDecoder.h
Source/WebCore/webaudio/AudioBasicProcessorNode.cpp
Source/WebCore/webaudio/AudioBasicProcessorNode.h
Source/WebCore/webaudio/AudioBuffer.cpp
Source/WebCore/webaudio/AudioBuffer.h
Source/WebCore/webaudio/AudioBufferSourceNode.cpp
Source/WebCore/webaudio/AudioBufferSourceNode.h
Source/WebCore/webaudio/AudioChannelMerger.cpp
Source/WebCore/webaudio/AudioChannelMerger.h
Source/WebCore/webaudio/AudioChannelSplitter.cpp
Source/WebCore/webaudio/AudioChannelSplitter.h
Source/WebCore/webaudio/AudioContext.cpp
Source/WebCore/webaudio/AudioContext.h
Source/WebCore/webaudio/AudioDestinationNode.cpp
Source/WebCore/webaudio/AudioDestinationNode.h
Source/WebCore/webaudio/AudioGainNode.cpp
Source/WebCore/webaudio/AudioGainNode.h
Source/WebCore/webaudio/AudioListener.cpp
Source/WebCore/webaudio/AudioListener.h
Source/WebCore/webaudio/AudioNode.cpp
Source/WebCore/webaudio/AudioNode.h
Source/WebCore/webaudio/AudioPannerNode.cpp
Source/WebCore/webaudio/AudioPannerNode.h
Source/WebCore/webaudio/AudioParam.cpp
Source/WebCore/webaudio/AudioParamTimeline.cpp
Source/WebCore/webaudio/AudioSourceNode.h
Source/WebCore/webaudio/BiquadFilterNode.cpp
Source/WebCore/webaudio/BiquadFilterNode.h
Source/WebCore/webaudio/BiquadProcessor.cpp
Source/WebCore/webaudio/BiquadProcessor.h
Source/WebCore/webaudio/ConvolverNode.cpp
Source/WebCore/webaudio/ConvolverNode.h
Source/WebCore/webaudio/DefaultAudioDestinationNode.cpp
Source/WebCore/webaudio/DefaultAudioDestinationNode.h
Source/WebCore/webaudio/DelayDSPKernel.cpp
Source/WebCore/webaudio/DelayDSPKernel.h
Source/WebCore/webaudio/DelayNode.cpp
Source/WebCore/webaudio/DelayNode.h
Source/WebCore/webaudio/DelayProcessor.cpp
Source/WebCore/webaudio/DelayProcessor.h
Source/WebCore/webaudio/DynamicsCompressorNode.cpp
Source/WebCore/webaudio/DynamicsCompressorNode.h
Source/WebCore/webaudio/HighPass2FilterNode.cpp
Source/WebCore/webaudio/HighPass2FilterNode.h
Source/WebCore/webaudio/JavaScriptAudioNode.cpp
Source/WebCore/webaudio/JavaScriptAudioNode.h
Source/WebCore/webaudio/LowPass2FilterNode.cpp
Source/WebCore/webaudio/LowPass2FilterNode.h
Source/WebCore/webaudio/OfflineAudioDestinationNode.h
Source/WebCore/webaudio/RealtimeAnalyserNode.cpp
Source/WebCore/webaudio/RealtimeAnalyserNode.h
Source/WebCore/webaudio/WaveShaperDSPKernel.cpp
Source/WebCore/webaudio/WaveShaperProcessor.cpp
Source/WebCore/webaudio/WaveShaperProcessor.h
Source/WebKit/chromium/ChangeLog
Source/WebKit/chromium/src/AudioDestinationChromium.cpp
Source/WebKit/chromium/src/AudioDestinationChromium.h

index e89d740..3b2bd91 100644 (file)
@@ -1,3 +1,228 @@
+2011-10-05  Jer Noble  <jer.noble@apple.com>
+
+        WEB_AUDIO does not compile on Leopard 32-bit.
+        https://bugs.webkit.org/show_bug.cgi?id=69292
+
+        Reviewed by Simon Fraser.
+
+        No new tests; covered by all existing audio tests.
+
+        Use of float and double within the WEB_AUDIO implementation have been harmonized, with most
+        calculations done using floats, with narrowPrecisionToFloat() added when necessary to
+        narrow double results down to floats, and with float constants initialized with float values:
+        * platform/audio/AudioBus.cpp:
+        (WebCore::AudioBus::AudioBus):
+        (WebCore::AudioBus::createByMixingToMono):
+        * platform/audio/AudioBus.h:
+        (WebCore::AudioBus::sampleRate):
+        (WebCore::AudioBus::setSampleRate):
+        * platform/audio/AudioDSPKernel.h:
+        (WebCore::AudioDSPKernel::AudioDSPKernel):
+        (WebCore::AudioDSPKernel::sampleRate):
+        * platform/audio/AudioDSPKernelProcessor.cpp:
+        (WebCore::AudioDSPKernelProcessor::AudioDSPKernelProcessor):
+        * platform/audio/AudioDSPKernelProcessor.h:
+        * platform/audio/AudioDestination.h:
+        * platform/audio/AudioFileReader.h:
+        * platform/audio/AudioProcessor.h:
+        (WebCore::AudioProcessor::AudioProcessor):
+        (WebCore::AudioProcessor::sampleRate):
+        * platform/audio/AudioUtilities.cpp:
+        (WebCore::AudioUtilities::decibelsToLinear):
+        (WebCore::AudioUtilities::linearToDecibels):
+        (WebCore::AudioUtilities::discreteTimeConstantForSampleRate):
+        * platform/audio/AudioUtilities.h:
+        * platform/audio/DynamicsCompressor.cpp:
+        (WebCore::DynamicsCompressor::DynamicsCompressor):
+        (WebCore::DynamicsCompressor::initializeParameters):
+        (WebCore::DynamicsCompressor::parameterValue):
+        (WebCore::DynamicsCompressor::setEmphasisStageParameters):
+        (WebCore::DynamicsCompressor::process):
+        * platform/audio/DynamicsCompressor.h:
+        (WebCore::DynamicsCompressor::sampleRate):
+        (WebCore::DynamicsCompressor::nyquist):
+        * platform/audio/DynamicsCompressorKernel.cpp:
+        (WebCore::saturate):
+        (WebCore::DynamicsCompressorKernel::DynamicsCompressorKernel):
+        (WebCore::DynamicsCompressorKernel::process):
+        * platform/audio/DynamicsCompressorKernel.h:
+        * platform/audio/EqualPowerPanner.cpp:
+        (WebCore::EqualPowerPanner::EqualPowerPanner):
+        * platform/audio/EqualPowerPanner.h:
+        * platform/audio/HRTFDatabase.cpp:
+        (WebCore::HRTFDatabase::create):
+        (WebCore::HRTFDatabase::HRTFDatabase):
+        * platform/audio/HRTFDatabase.h:
+        (WebCore::HRTFDatabase::sampleRate):
+        * platform/audio/HRTFDatabaseLoader.cpp:
+        (WebCore::HRTFDatabaseLoader::createAndLoadAsynchronouslyIfNecessary):
+        (WebCore::HRTFDatabaseLoader::HRTFDatabaseLoader):
+        * platform/audio/HRTFDatabaseLoader.h:
+        (WebCore::HRTFDatabaseLoader::databaseSampleRate):
+        * platform/audio/HRTFElevation.cpp:
+        (WebCore::HRTFElevation::calculateSymmetricKernelsForAzimuthElevation):
+        (WebCore::HRTFElevation::calculateKernelsForAzimuthElevation):
+        (WebCore::HRTFElevation::createForSubject):
+        (WebCore::HRTFElevation::createByInterpolatingSlices):
+        * platform/audio/HRTFElevation.h:
+        (WebCore::HRTFElevation::sampleRate):
+        (WebCore::HRTFElevation::HRTFElevation):
+        * platform/audio/HRTFKernel.cpp:
+        (WebCore::extractAverageGroupDelay):
+        (WebCore::HRTFKernel::HRTFKernel):
+        (WebCore::HRTFKernel::createInterpolatedKernel):
+        * platform/audio/HRTFKernel.h:
+        (WebCore::HRTFKernel::create):
+        (WebCore::HRTFKernel::frameDelay):
+        (WebCore::HRTFKernel::sampleRate):
+        (WebCore::HRTFKernel::HRTFKernel):
+        * platform/audio/HRTFPanner.cpp:
+        (WebCore::HRTFPanner::HRTFPanner):
+        (WebCore::HRTFPanner::fftSizeForSampleRate):
+        * platform/audio/HRTFPanner.h:
+        (WebCore::HRTFPanner::sampleRate):
+        * platform/audio/Panner.cpp:
+        (WebCore::Panner::create):
+        * platform/audio/Panner.h:
+        * platform/audio/chromium/AudioBusChromium.cpp:
+        (WebCore::AudioBus::loadPlatformResource):
+        * platform/audio/mac/AudioBusMac.mm:
+        (WebCore::AudioBus::loadPlatformResource):
+        * platform/audio/mac/AudioDestinationMac.cpp:
+        (WebCore::AudioDestination::create):
+        (WebCore::AudioDestination::hardwareSampleRate):
+        (WebCore::AudioDestinationMac::AudioDestinationMac):
+        * platform/audio/mac/AudioDestinationMac.h:
+        (WebCore::AudioDestinationMac::sampleRate):
+        * platform/audio/mac/AudioFileReaderMac.cpp:
+        (WebCore::AudioFileReader::createBus):
+        (WebCore::createBusFromAudioFile):
+        (WebCore::createBusFromInMemoryAudioFile):
+        * platform/audio/mac/AudioFileReaderMac.h:
+        * webaudio/AsyncAudioDecoder.cpp:
+        (WebCore::AsyncAudioDecoder::decodeAsync):
+        (WebCore::AsyncAudioDecoder::DecodingTask::create):
+        (WebCore::AsyncAudioDecoder::DecodingTask::DecodingTask):
+        * webaudio/AsyncAudioDecoder.h:
+        (WebCore::AsyncAudioDecoder::DecodingTask::sampleRate):
+        * webaudio/AudioBasicProcessorNode.cpp:
+        (WebCore::AudioBasicProcessorNode::AudioBasicProcessorNode):
+        * webaudio/AudioBasicProcessorNode.h:
+        * webaudio/AudioBuffer.cpp:
+        (WebCore::AudioBuffer::create):
+        (WebCore::AudioBuffer::createFromAudioFileData):
+        (WebCore::AudioBuffer::AudioBuffer):
+        * webaudio/AudioBuffer.h:
+        (WebCore::AudioBuffer::sampleRate):
+        * webaudio/AudioBufferSourceNode.cpp:
+        (WebCore::AudioBufferSourceNode::create):
+        (WebCore::AudioBufferSourceNode::AudioBufferSourceNode):
+        (WebCore::AudioBufferSourceNode::process):
+        (WebCore::AudioBufferSourceNode::renderFromBuffer):
+        * webaudio/AudioBufferSourceNode.h:
+        * webaudio/AudioChannelMerger.cpp:
+        (WebCore::AudioChannelMerger::AudioChannelMerger):
+        * webaudio/AudioChannelMerger.h:
+        (WebCore::AudioChannelMerger::create):
+        * webaudio/AudioChannelSplitter.cpp:
+        (WebCore::AudioChannelSplitter::AudioChannelSplitter):
+        * webaudio/AudioChannelSplitter.h:
+        (WebCore::AudioChannelSplitter::create):
+        * webaudio/AudioContext.cpp:
+        (WebCore::AudioContext::createOfflineContext):
+        (WebCore::AudioContext::AudioContext):
+        (WebCore::AudioContext::createBuffer):
+        * webaudio/AudioContext.h:
+        (WebCore::AudioContext::sampleRate):
+        * webaudio/AudioDestinationNode.cpp:
+        (WebCore::AudioDestinationNode::AudioDestinationNode):
+        * webaudio/AudioDestinationNode.h:
+        * webaudio/AudioGainNode.cpp:
+        (WebCore::AudioGainNode::AudioGainNode):
+        * webaudio/AudioGainNode.h:
+        (WebCore::AudioGainNode::create):
+        * webaudio/AudioListener.cpp:
+        (WebCore::AudioListener::AudioListener):
+        * webaudio/AudioListener.h:
+        (WebCore::AudioListener::setPosition):
+        (WebCore::AudioListener::setOrientation):
+        (WebCore::AudioListener::setVelocity):
+        * webaudio/AudioNode.cpp:
+        (WebCore::AudioNode::AudioNode):
+        * webaudio/AudioNode.h:
+        (WebCore::AudioNode::sampleRate):
+        * webaudio/AudioPannerNode.cpp:
+        (WebCore::AudioPannerNode::AudioPannerNode):
+        (WebCore::AudioPannerNode::getAzimuthElevation):
+        * webaudio/AudioPannerNode.h:
+        (WebCore::AudioPannerNode::create):
+        * webaudio/AudioParam.cpp:
+        (WebCore::AudioParam::value):
+        (WebCore::AudioParam::smoothedValue):
+        (WebCore::AudioParam::smooth):
+        (WebCore::AudioParam::calculateSampleAccurateValues):
+        * webaudio/AudioParamTimeline.cpp:
+        (WebCore::AudioParamTimeline::valueForContextTime):
+        (WebCore::timeToSampleFrame):
+        (WebCore::AudioParamTimeline::valuesForTimeRangeImpl):
+        * webaudio/AudioSourceNode.h:
+        (WebCore::AudioSourceNode::AudioSourceNode):
+        * webaudio/BiquadFilterNode.cpp:
+        (WebCore::BiquadFilterNode::BiquadFilterNode):
+        * webaudio/BiquadFilterNode.h:
+        (WebCore::BiquadFilterNode::create):
+        * webaudio/BiquadProcessor.cpp:
+        (WebCore::BiquadProcessor::BiquadProcessor):
+        * webaudio/BiquadProcessor.h:
+        * webaudio/ConvolverNode.cpp:
+        (WebCore::ConvolverNode::ConvolverNode):
+        * webaudio/ConvolverNode.h:
+        (WebCore::ConvolverNode::create):
+        * webaudio/DefaultAudioDestinationNode.cpp:
+        (WebCore::DefaultAudioDestinationNode::initialize):
+        * webaudio/DefaultAudioDestinationNode.h:
+        (WebCore::DefaultAudioDestinationNode::sampleRate):
+        * webaudio/DelayDSPKernel.cpp:
+        (WebCore::DelayDSPKernel::DelayDSPKernel):
+        (WebCore::DelayDSPKernel::process):
+        * webaudio/DelayDSPKernel.h:
+        * webaudio/DelayNode.cpp:
+        (WebCore::DelayNode::DelayNode):
+        * webaudio/DelayNode.h:
+        (WebCore::DelayNode::create):
+        * webaudio/DelayProcessor.cpp:
+        (WebCore::DelayProcessor::DelayProcessor):
+        * webaudio/DelayProcessor.h:
+        * webaudio/DynamicsCompressorNode.cpp:
+        (WebCore::DynamicsCompressorNode::DynamicsCompressorNode):
+        * webaudio/DynamicsCompressorNode.h:
+        (WebCore::DynamicsCompressorNode::create):
+        * webaudio/HighPass2FilterNode.cpp:
+        (WebCore::HighPass2FilterNode::HighPass2FilterNode):
+        * webaudio/HighPass2FilterNode.h:
+        (WebCore::HighPass2FilterNode::create):
+        * webaudio/JavaScriptAudioNode.cpp:
+        (WebCore::JavaScriptAudioNode::create):
+        (WebCore::JavaScriptAudioNode::JavaScriptAudioNode):
+        (WebCore::JavaScriptAudioNode::initialize):
+        * webaudio/JavaScriptAudioNode.h:
+        * webaudio/LowPass2FilterNode.cpp:
+        (WebCore::LowPass2FilterNode::LowPass2FilterNode):
+        * webaudio/LowPass2FilterNode.h:
+        (WebCore::LowPass2FilterNode::create):
+        * webaudio/OfflineAudioDestinationNode.h:
+        (WebCore::OfflineAudioDestinationNode::sampleRate):
+        * webaudio/RealtimeAnalyserNode.cpp:
+        (WebCore::RealtimeAnalyserNode::RealtimeAnalyserNode):
+        * webaudio/RealtimeAnalyserNode.h:
+        (WebCore::RealtimeAnalyserNode::create):
+        * webaudio/WaveShaperDSPKernel.cpp:
+        (WebCore::WaveShaperDSPKernel::process):
+        * webaudio/WaveShaperProcessor.cpp:
+        (WebCore::WaveShaperProcessor::WaveShaperProcessor):
+        * webaudio/WaveShaperProcessor.h:
+
+
 2011-10-05  Alexey Proskuryakov  <ap@apple.com>
 
         [Mac] Make built-in PDF description localizable
index e34f7db..ad29af1 100644 (file)
@@ -48,9 +48,9 @@ using namespace VectorMath;
     
 AudioBus::AudioBus(unsigned numberOfChannels, size_t length, bool allocate)
     : m_length(length)
-    , m_busGain(1.0)
+    , m_busGain(1)
     , m_isFirstTime(true)
-    , m_sampleRate(0.0)
+    , m_sampleRate(0)
 {
     m_channels.reserveInitialCapacity(numberOfChannels);
 
@@ -466,7 +466,7 @@ PassOwnPtr<AudioBus> AudioBus::createByMixingToMono(AudioBus* sourceBus)
         
             // Do the mono mixdown.
             for (unsigned i = 0; i < n; ++i)
-                destination[i] = 0.5 * (sourceL[i] + sourceR[i]);
+                destination[i] = (sourceL[i] + sourceR[i]) / 2;
 
             destinationBus->setSampleRate(sourceBus->sampleRate());    
             return destinationBus.release();
index a87f55d..53fb4b9 100644 (file)
@@ -76,8 +76,8 @@ public:
     size_t length() const { return m_length; }
 
     // Sample-rate : 0.0 if unknown or "don't care"
-    double sampleRate() const { return m_sampleRate; }
-    void setSampleRate(double sampleRate) { m_sampleRate = sampleRate; }
+    float sampleRate() const { return m_sampleRate; }
+    void setSampleRate(float sampleRate) { m_sampleRate = sampleRate; }
 
     // Zeroes all channels.
     void zero();
@@ -133,7 +133,7 @@ public:
     // Makes maximum absolute value == 1.0 (if possible).
     void normalize();
 
-    static PassOwnPtr<AudioBus> loadPlatformResource(const char* name, double sampleRate);
+    static PassOwnPtr<AudioBus> loadPlatformResource(const char* name, float sampleRate);
 
 protected:
     AudioBus() { };
@@ -149,7 +149,7 @@ protected:
 
     double m_busGain;
     bool m_isFirstTime;
-    double m_sampleRate; // 0.0 if unknown or N/A
+    float m_sampleRate; // 0.0 if unknown or N/A
 };
 
 } // WebCore
index d0719c5..f33c9ed 100644 (file)
@@ -45,7 +45,7 @@ public:
     {
     }
 
-    AudioDSPKernel(double sampleRate)
+    AudioDSPKernel(float sampleRate)
         : m_kernelProcessor(0)
         , m_sampleRate(sampleRate)
     {
@@ -57,7 +57,7 @@ public:
     virtual void process(const float* source, float* destination, size_t framesToProcess) = 0;
     virtual void reset() = 0;
 
-    double sampleRate() const { return m_sampleRate; }
+    float sampleRate() const { return m_sampleRate; }
     double nyquist() const { return 0.5 * sampleRate(); }
 
     AudioDSPKernelProcessor* processor() { return m_kernelProcessor; }
@@ -65,7 +65,7 @@ public:
 
 protected:
     AudioDSPKernelProcessor* m_kernelProcessor;
-    double m_sampleRate;
+    float m_sampleRate;
 };
 
 } // namespace WebCore
index 45068db..cf4d2d3 100644 (file)
@@ -39,7 +39,7 @@
 namespace WebCore {
 
 // setNumberOfChannels() may later be called if the object is not yet in an "initialized" state.
-AudioDSPKernelProcessor::AudioDSPKernelProcessor(double sampleRate, unsigned numberOfChannels)
+AudioDSPKernelProcessor::AudioDSPKernelProcessor(float sampleRate, unsigned numberOfChannels)
     : AudioProcessor(sampleRate)
     , m_numberOfChannels(numberOfChannels)
     , m_hasJustReset(true)
index e87a810..40b5ab8 100644 (file)
@@ -50,7 +50,7 @@ class AudioProcessor;
 class AudioDSPKernelProcessor : public AudioProcessor {
 public:
     // numberOfChannels may be later changed if object is not yet in an "initialized" state
-    AudioDSPKernelProcessor(double sampleRate, unsigned numberOfChannels);
+    AudioDSPKernelProcessor(float sampleRate, unsigned numberOfChannels);
 
     // Subclasses create the appropriate type of processing kernel here.
     // We'll call this to create a kernel for each channel.
index 9498110..75882c0 100644 (file)
@@ -41,7 +41,7 @@ class AudioSourceProvider;
 
 class AudioDestination {
 public:
-    static PassOwnPtr<AudioDestination> create(AudioSourceProvider&, double sampleRate);
+    static PassOwnPtr<AudioDestination> create(AudioSourceProvider&, float sampleRate);
 
     virtual ~AudioDestination() { }
 
@@ -50,8 +50,8 @@ public:
     virtual bool isPlaying() = 0;
 
     // Sample-rate conversion may happen in AudioDestination to the hardware sample-rate
-    virtual double sampleRate() const = 0;
-    static double hardwareSampleRate();
+    virtual float sampleRate() const = 0;
+    static float hardwareSampleRate();
 };
 
 } // namespace WebCore
index 3c02490..0c8dc6e 100644 (file)
@@ -42,9 +42,9 @@ class AudioBus;
 // sampleRate will be made (if it doesn't already match the file's sample-rate).
 // The created buffer will have its sample-rate set correctly to the result.
 
-PassOwnPtr<AudioBus> createBusFromInMemoryAudioFile(const void* data, size_t dataSize, bool mixToMono, double sampleRate);
+PassOwnPtr<AudioBus> createBusFromInMemoryAudioFile(const void* data, size_t dataSize, bool mixToMono, float sampleRate);
 
-PassOwnPtr<AudioBus> createBusFromAudioFile(const char* filePath, bool mixToMono, double sampleRate);
+PassOwnPtr<AudioBus> createBusFromAudioFile(const char* filePath, bool mixToMono, float sampleRate);
                                 
 // May pass in 0.0 for sampleRate in which case it will use the AudioBus's sampleRate                               
 void writeBusToAudioFile(AudioBus* bus, const char* filePath, double fileSampleRate);
index 69ba40f..2d7b60a 100644 (file)
@@ -41,7 +41,7 @@ class AudioBus;
 
 class AudioProcessor {
 public:
-    AudioProcessor(double sampleRate)
+    AudioProcessor(float sampleRate)
         : m_initialized(false)
         , m_sampleRate(sampleRate)
     {
@@ -63,11 +63,11 @@ public:
 
     bool isInitialized() const { return m_initialized; }
 
-    double sampleRate() const { return m_sampleRate; }
+    float sampleRate() const { return m_sampleRate; }
 
 protected:
     bool m_initialized;
-    double m_sampleRate;
+    float m_sampleRate;
 };
 
 } // namespace WebCore
index 7a4b32e..1a02b7e 100644 (file)
@@ -33,27 +33,27 @@ namespace WebCore {
 
 namespace AudioUtilities {
 
-double decibelsToLinear(double decibels)
+float decibelsToLinear(float decibels)
 {
-    return pow(10.0, 0.05 * decibels);
+    return powf(10, 0.05f * decibels);
 }
 
-double linearToDecibels(double linear)
+float linearToDecibels(float linear)
 {
     // It's not possible to calculate decibels for a zero linear value since it would be -Inf.
     // -1000.0 dB represents a very tiny linear value in case we ever reach this case.
     ASSERT(linear);
     if (!linear)
-        return -1000.0;
+        return -1000;
         
-    return 20.0 * log10(linear);
+    return 20 * log10f(linear);
 }
 
-double discreteTimeConstantForSampleRate(double timeConstant, double sampleRate)
+float discreteTimeConstantForSampleRate(float timeConstant, float sampleRate)
 {
     // hardcoded value is temporary build fix for Windows.
     // FIXME: replace hardcode 2.718282 with M_E until the correct MathExtras.h solution is determined.
-    return 1.0 - pow(1.0 / 2.718282, 1.0 / (sampleRate * timeConstant));
+    return 1 - powf(1 / 2.718282f, 1 / (sampleRate * timeConstant));
 }
     
 } // AudioUtilites
index 7cf44ce..c98a4c8 100644 (file)
@@ -30,13 +30,13 @@ namespace WebCore {
 namespace AudioUtilities {
 
 // Standard functions for converting to and from decibel values from linear.
-double linearToDecibels(double);
-double decibelsToLinear(double);
+float linearToDecibels(float);
+float decibelsToLinear(float);
 
 // timeConstant is the time it takes a first-order linear time-invariant system
 // to reach the value 1 - 1/e (around 63.2%) given a step input response.
 // discreteTimeConstantForSampleRate() will return the discrete time-constant for the specific sampleRate.
-double discreteTimeConstantForSampleRate(double timeConstant, double sampleRate);
+float discreteTimeConstantForSampleRate(float timeConstant, float sampleRate);
     
 } // AudioUtilites
 
index 15eec9f..c4795f4 100644 (file)
@@ -40,7 +40,7 @@ namespace WebCore {
 
 using namespace AudioUtilities;
     
-DynamicsCompressor::DynamicsCompressor(bool isStereo, double sampleRate)
+DynamicsCompressor::DynamicsCompressor(bool isStereo, float sampleRate)
     : m_isStereo(isStereo)
     , m_sampleRate(sampleRate)
     , m_compressor(sampleRate)
@@ -59,17 +59,17 @@ void DynamicsCompressor::initializeParameters()
     
     m_parameters[ParamThreshold] = -24; // dB
     m_parameters[ParamHeadroom] = 21; // dB
-    m_parameters[ParamAttack] = 0.003; // seconds
-    m_parameters[ParamRelease] = 0.250; // seconds
-    m_parameters[ParamPreDelay] = 0.006; // seconds
+    m_parameters[ParamAttack] = 0.003f; // seconds
+    m_parameters[ParamRelease] = 0.250f; // seconds
+    m_parameters[ParamPreDelay] = 0.006f; // seconds
 
     // Release zone values 0 -> 1.
-    m_parameters[ParamReleaseZone1] = 0.09;
-    m_parameters[ParamReleaseZone2] = 0.16;
-    m_parameters[ParamReleaseZone3] = 0.42;
-    m_parameters[ParamReleaseZone4] = 0.98;
+    m_parameters[ParamReleaseZone1] = 0.09f;
+    m_parameters[ParamReleaseZone2] = 0.16f;
+    m_parameters[ParamReleaseZone3] = 0.42f;
+    m_parameters[ParamReleaseZone4] = 0.98f;
 
-    m_parameters[ParamFilterStageGain] = 4.4; // dB
+    m_parameters[ParamFilterStageGain] = 4.4f; // dB
     m_parameters[ParamFilterStageRatio] = 2;
     m_parameters[ParamFilterAnchor] = 15000 / nyquist();
     
@@ -79,7 +79,7 @@ void DynamicsCompressor::initializeParameters()
     m_parameters[ParamEffectBlend] = 1;
 }
 
-double DynamicsCompressor::parameterValue(unsigned parameterID)
+float DynamicsCompressor::parameterValue(unsigned parameterID)
 {
     ASSERT(parameterID < ParamLast);
     return m_parameters[parameterID];
@@ -90,8 +90,8 @@ void DynamicsCompressor::setEmphasisStageParameters(unsigned stageIndex, float g
     float gk = 1 - gain / 20;
     float f1 = normalizedFrequency * gk;
     float f2 = normalizedFrequency / gk;
-    float r1 = exp(-f1 * piDouble);
-    float r2 = exp(-f2 * piDouble);
+    float r1 = expf(-f1 * piFloat);
+    float r2 = expf(-f2 * piFloat);
 
     // Set pre-filter zero and pole to create an emphasis filter.
     m_preFilter[stageIndex].setZero(r1);
@@ -170,10 +170,10 @@ void DynamicsCompressor::process(AudioBus* sourceBus, AudioBus* destinationBus,
     // 1 mixes in only the compressed signal.
     float effectBlend = parameterValue(ParamEffectBlend);
 
-    double releaseZone1 = parameterValue(ParamReleaseZone1);
-    double releaseZone2 = parameterValue(ParamReleaseZone2);
-    double releaseZone3 = parameterValue(ParamReleaseZone3);
-    double releaseZone4 = parameterValue(ParamReleaseZone4);
+    float releaseZone1 = parameterValue(ParamReleaseZone1);
+    float releaseZone2 = parameterValue(ParamReleaseZone2);
+    float releaseZone3 = parameterValue(ParamReleaseZone3);
+    float releaseZone4 = parameterValue(ParamReleaseZone4);
 
     // Apply compression to the pre-filtered signal.
     // The processing is performed in place.
index d0c036f..2152951 100644 (file)
@@ -62,26 +62,26 @@ public:
         ParamLast
     };
 
-    DynamicsCompressor(bool isStereo, double sampleRate);
+    DynamicsCompressor(bool isStereo, float sampleRate);
 
     void process(AudioBus* sourceBus, AudioBus* destinationBus, unsigned framesToProcess);
     void reset();
 
-    double parameterValue(unsigned parameterID);
+    float parameterValue(unsigned parameterID);
 
     bool isStereo() const { return m_isStereo; }
-    double sampleRate() const { return m_sampleRate; }
-    double nyquist() const { return 0.5 * m_sampleRate; }
+    float sampleRate() const { return m_sampleRate; }
+    float nyquist() const { return m_sampleRate / 2; }
 
 protected:
     // m_parameters holds the tweakable compressor parameters.
     // FIXME: expose some of the most important ones (such as threshold, attack, release)
     // as DynamicsCompressorNode attributes.
-    double m_parameters[ParamLast];
+    float m_parameters[ParamLast];
     void initializeParameters();
 
     bool m_isStereo;
-    double m_sampleRate;
+    float m_sampleRate;
 
     // Emphasis filter controls.
     float m_lastFilterStageRatio;
index a072346..2229c38 100644 (file)
@@ -43,15 +43,15 @@ namespace WebCore {
 using namespace AudioUtilities;
 
 // Metering hits peaks instantly, but releases this fast (in seconds).
-const double meteringReleaseTimeConstant = 0.325;
+const float meteringReleaseTimeConstant = 0.325f;
     
 // Exponential saturation curve.
-static double saturate(double x, double k)
+static float saturate(float x, float k)
 {
     return 1 - exp(-k * x);
 }
 
-DynamicsCompressorKernel::DynamicsCompressorKernel(double sampleRate)
+DynamicsCompressorKernel::DynamicsCompressorKernel(float sampleRate)
     : m_sampleRate(sampleRate)
     , m_lastPreDelayFrames(DefaultPreDelayFrames)
     , m_preDelayBufferL(MaxPreDelayFrames)
@@ -108,18 +108,18 @@ void DynamicsCompressorKernel::process(float* sourceL,
     float wetMix = effectBlend;
 
     // Threshold and headroom.
-    double linearThreshold = decibelsToLinear(dbThreshold);
-    double linearHeadroom = decibelsToLinear(dbHeadroom);
+    float linearThreshold = decibelsToLinear(dbThreshold);
+    float linearHeadroom = decibelsToLinear(dbHeadroom);
 
     // Makeup gain.
-    double maximum = 1.05 * linearHeadroom * linearThreshold;
-    double kk = (maximum - linearThreshold);
-    double inverseKK = 1 / kk;
+    float maximum = 1.05f * linearHeadroom * linearThreshold;
+    float kk = (maximum - linearThreshold);
+    float inverseKK = 1 / kk;
 
-    double fullRangeGain = (linearThreshold + kk * saturate(1 - linearThreshold, 1));
-    double fullRangeMakeupGain = 1 / fullRangeGain;
+    float fullRangeGain = (linearThreshold + kk * saturate(1 - linearThreshold, 1));
+    float fullRangeMakeupGain = 1 / fullRangeGain;
     // Empirical/perceptual tuning.
-    fullRangeMakeupGain = pow(fullRangeMakeupGain, 0.6);
+    fullRangeMakeupGain = powf(fullRangeMakeupGain, 0.6f);
 
     float masterLinearGain = decibelsToLinear(dbPostGain) * fullRangeMakeupGain;
 
@@ -131,26 +131,26 @@ void DynamicsCompressorKernel::process(float* sourceL,
     float releaseFrames = sampleRate * releaseTime;
     
     // Detector release time.
-    double satReleaseTime = 0.0025;
-    double satReleaseFrames = satReleaseTime * sampleRate;
+    float satReleaseTime = 0.0025f;
+    float satReleaseFrames = satReleaseTime * sampleRate;
 
     // Create a smooth function which passes through four points.
 
     // Polynomial of the form
     // y = a + b*x + c*x^2 + d*x^3 + e*x^4;
 
-    double y1 = releaseFrames * releaseZone1;
-    double y2 = releaseFrames * releaseZone2;
-    double y3 = releaseFrames * releaseZone3;
-    double y4 = releaseFrames * releaseZone4;
+    float y1 = releaseFrames * releaseZone1;
+    float y2 = releaseFrames * releaseZone2;
+    float y3 = releaseFrames * releaseZone3;
+    float y4 = releaseFrames * releaseZone4;
 
     // All of these coefficients were derived for 4th order polynomial curve fitting where the y values
     // match the evenly spaced x values as follows: (y1 : x == 0, y2 : x == 1, y3 : x == 2, y4 : x == 3)
-    double kA = 0.9999999999999998*y1 + 1.8432219684323923e-16*y2 - 1.9373394351676423e-16*y3 + 8.824516011816245e-18*y4;
-    double kB = -1.5788320352845888*y1 + 2.3305837032074286*y2 - 0.9141194204840429*y3 + 0.1623677525612032*y4;
-    double kC = 0.5334142869106424*y1 - 1.272736789213631*y2 + 0.9258856042207512*y3 - 0.18656310191776226*y4;
-    double kD = 0.08783463138207234*y1 - 0.1694162967925622*y2 + 0.08588057951595272*y3 - 0.00429891410546283*y4;
-    double kE = -0.042416883008123074*y1 + 0.1115693827987602*y2 - 0.09764676325265872*y3 + 0.028494263462021576*y4;
+    float kA = 0.9999999999999998f*y1 + 1.8432219684323923e-16f*y2 - 1.9373394351676423e-16f*y3 + 8.824516011816245e-18f*y4;
+    float kB = -1.5788320352845888f*y1 + 2.3305837032074286f*y2 - 0.9141194204840429f*y3 + 0.1623677525612032f*y4;
+    float kC = 0.5334142869106424f*y1 - 1.272736789213631f*y2 + 0.9258856042207512f*y3 - 0.18656310191776226f*y4;
+    float kD = 0.08783463138207234f*y1 - 0.1694162967925622f*y2 + 0.08588057951595272f*y3 - 0.00429891410546283f*y4;
+    float kE = -0.042416883008123074f*y1 + 0.1115693827987602f*y2 - 0.09764676325265872f*y3 + 0.028494263462021576f*y4;
 
     // x ranges from 0 -> 3       0    1    2   3
     //                           -15  -10  -5   0db
@@ -177,7 +177,7 @@ void DynamicsCompressorKernel::process(float* sourceL,
         float desiredGain = m_detectorAverage;
 
         // Pre-warp so we get desiredGain after sin() warp below.
-        double scaledDesiredGain = asin(desiredGain) / (0.5 * piDouble);
+        float scaledDesiredGain = asinf(desiredGain) / (0.5f * piFloat);
 
         // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
         // Deal with envelopes
@@ -190,7 +190,7 @@ void DynamicsCompressorKernel::process(float* sourceL,
         bool isReleasing = scaledDesiredGain > m_compressorGain;
 
         // compressionDiffDb is the difference between current compression level and the desired level.
-        double compressionDiffDb = linearToDecibels(m_compressorGain / scaledDesiredGain);
+        float compressionDiffDb = linearToDecibels(m_compressorGain / scaledDesiredGain);
 
         if (isReleasing) {
             // Release mode - compressionDiffDb should be negative dB
@@ -205,20 +205,20 @@ void DynamicsCompressorKernel::process(float* sourceL,
             // Adaptive release - higher compression (lower compressionDiffDb)  releases faster.
 
             // Contain within range: -12 -> 0 then scale to go from 0 -> 3
-            double x = compressionDiffDb;
-            x = max(-12., x);
-            x = min(0., x);
-            x = 0.25 * (x + 12);
+            float x = compressionDiffDb;
+            x = max(-12.0f, x);
+            x = min(0.0f, x);
+            x = 0.25f * (x + 12);
 
             // Compute adaptive release curve using 4th order polynomial.
             // Normal values for the polynomial coefficients would create a monotonically increasing function.
-            double x2 = x * x;
-            double x3 = x2 * x;
-            double x4 = x2 * x2;
-            double releaseFrames = kA + kB * x + kC * x2 + kD * x3 + kE * x4;
+            float x2 = x * x;
+            float x3 = x2 * x;
+            float x4 = x2 * x2;
+            float releaseFrames = kA + kB * x + kC * x2 + kD * x3 + kE * x4;
 
 #define kSpacingDb 5
-            double dbPerFrame = kSpacingDb / releaseFrames;
+            float dbPerFrame = kSpacingDb / releaseFrames;
 
             envelopeRate = decibelsToLinear(dbPerFrame);
         } else {
@@ -235,10 +235,10 @@ void DynamicsCompressorKernel::process(float* sourceL,
             if (m_maxAttackCompressionDiffDb == -1 || m_maxAttackCompressionDiffDb < compressionDiffDb)
                 m_maxAttackCompressionDiffDb = compressionDiffDb;
 
-            double effAttenDiffDb = max(0.5f, m_maxAttackCompressionDiffDb);
+            float effAttenDiffDb = max(0.5f, m_maxAttackCompressionDiffDb);
 
-            double x = 0.25 / effAttenDiffDb;
-            envelopeRate = 1 - pow(x, double(1 / attackFrames));
+            float x = 0.25f / effAttenDiffDb;
+            envelopeRate = 1 - powf(x, 1 / attackFrames);
         }
 
         // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -264,7 +264,7 @@ void DynamicsCompressorKernel::process(float* sourceL,
                     float undelayedL = *sourceL++;
                     float undelayedR = *sourceR++;
 
-                    compressorInput = 0.5 * (undelayedL + undelayedR);
+                    compressorInput = 0.5f * (undelayedL + undelayedR);
 
                     inputL = delayBufferL[preDelayReadIndex];
                     inputR = delayBufferR[preDelayReadIndex];
@@ -284,24 +284,24 @@ void DynamicsCompressorKernel::process(float* sourceL,
                 // Calculate shaped power on undelayed input.
 
                 float scaledInput = compressorInput;
-                double absInput = scaledInput > 0 ? scaledInput : -scaledInput;
+                float absInput = scaledInput > 0 ? scaledInput : -scaledInput;
 
                 // Put through shaping curve.
                 // This is linear up to the threshold, then exponentially approaches the maximum (headroom amount above threshold).
                 // The transition from the threshold to the exponential portion is smooth (1st derivative matched).
-                double shapedInput = absInput < linearThreshold ? absInput : linearThreshold + kk * saturate(absInput - linearThreshold, inverseKK);
+                float shapedInput = absInput < linearThreshold ? absInput : linearThreshold + kk * saturate(absInput - linearThreshold, inverseKK);
 
-                double attenuation = absInput <= 0.0001 ? 1 : shapedInput / absInput;
+                float attenuation = absInput <= 0.0001f ? 1 : shapedInput / absInput;
 
-                double attenuationDb = -linearToDecibels(attenuation);
-                attenuationDb = max(2., attenuationDb);
+                float attenuationDb = -linearToDecibels(attenuation);
+                attenuationDb = max(2.0f, attenuationDb);
 
-                double dbPerFrame = attenuationDb / satReleaseFrames;
+                float dbPerFrame = attenuationDb / satReleaseFrames;
 
-                double satReleaseRate = decibelsToLinear(dbPerFrame) - 1;
+                float satReleaseRate = decibelsToLinear(dbPerFrame) - 1;
 
                 bool isRelease = (attenuation > detectorAverage);
-                double rate = isRelease ? satReleaseRate : 1;
+                float rate = isRelease ? satReleaseRate : 1;
 
                 detectorAverage += (attenuation - detectorAverage) * rate;
                 detectorAverage = min(1.0f, detectorAverage);
@@ -323,13 +323,13 @@ void DynamicsCompressorKernel::process(float* sourceL,
                 }
 
                 // Warp pre-compression gain to smooth out sharp exponential transition points.
-                double postWarpCompressorGain = sin(0.5 * piDouble * compressorGain);
+                float postWarpCompressorGain = sinf(0.5f * piFloat * compressorGain);
 
                 // Calculate total gain using master gain and effect blend.
-                double totalGain = dryMix + wetMix * masterLinearGain * postWarpCompressorGain;
+                float totalGain = dryMix + wetMix * masterLinearGain * postWarpCompressorGain;
 
                 // Calculate metering.
-                double dbRealGain = 20 * log10(postWarpCompressorGain);
+                float dbRealGain = 20 * log10(postWarpCompressorGain);
                 if (dbRealGain < m_meteringGain)
                     m_meteringGain = dbRealGain;
                 else
index a6e70d3..8e5f709 100644 (file)
@@ -35,7 +35,7 @@ namespace WebCore {
 
 class DynamicsCompressorKernel {
 public:
-    DynamicsCompressorKernel(double sampleRate);
+    DynamicsCompressorKernel(float sampleRate);
 
     // Performs stereo-linked compression.
     void process(float *sourceL,
index e2c2a5c..13a19d6 100644 (file)
 #include <wtf/MathExtras.h>
 
 // Use a 50ms smoothing / de-zippering time-constant.
-const double SmoothingTimeConstant = 0.050;
+const float SmoothingTimeConstant = 0.050f;
 
 using namespace std;
 
 namespace WebCore {
 
-EqualPowerPanner::EqualPowerPanner(double sampleRate)
+EqualPowerPanner::EqualPowerPanner(float sampleRate)
     : Panner(PanningModelEqualPower)
     , m_isFirstRender(true)
     , m_gainL(0.0)
index f20617e..4f6001d 100644 (file)
@@ -33,7 +33,7 @@ namespace WebCore {
 
 class EqualPowerPanner : public Panner {
 public:
-    EqualPowerPanner(double sampleRate);
+    EqualPowerPanner(float sampleRate);
 
     virtual void pan(double azimuth, double elevation, AudioBus* inputBus, AudioBus* outputBuf, size_t framesToProcess);
 
index ef1229f..8558698 100644 (file)
@@ -45,13 +45,13 @@ const unsigned HRTFDatabase::NumberOfRawElevations = 10; // -45 -> +90 (each 15
 const unsigned HRTFDatabase::InterpolationFactor = 1;
 const unsigned HRTFDatabase::NumberOfTotalElevations = NumberOfRawElevations * InterpolationFactor;
 
-PassOwnPtr<HRTFDatabase> HRTFDatabase::create(double sampleRate)
+PassOwnPtr<HRTFDatabase> HRTFDatabase::create(float sampleRate)
 {
     OwnPtr<HRTFDatabase> hrtfDatabase = adoptPtr(new HRTFDatabase(sampleRate));
     return hrtfDatabase.release();
 }
 
-HRTFDatabase::HRTFDatabase(double sampleRate)
+HRTFDatabase::HRTFDatabase(float sampleRate)
     : m_elevations(NumberOfTotalElevations)
     , m_sampleRate(sampleRate)
 {
@@ -75,7 +75,7 @@ HRTFDatabase::HRTFDatabase(double sampleRate)
 
             // Create the interpolated convolution kernels and delays.
             for (unsigned jj = 1; jj < InterpolationFactor; ++jj) {
-                double x = static_cast<double>(jj) / static_cast<double>(InterpolationFactor);
+                float x = static_cast<float>(jj) / static_cast<float>(InterpolationFactor);
                 m_elevations[i + jj] = HRTFElevation::createByInterpolatingSlices(m_elevations[i].get(), m_elevations[j].get(), x, sampleRate);
                 ASSERT(m_elevations[i + jj].get());
             }
index bf13a3a..bb7cf53 100644 (file)
@@ -46,7 +46,7 @@ class HRTFKernel;
 class HRTFDatabase {
     WTF_MAKE_NONCOPYABLE(HRTFDatabase);
 public:
-    static PassOwnPtr<HRTFDatabase> create(double sampleRate);
+    static PassOwnPtr<HRTFDatabase> create(float sampleRate);
 
     // getKernelsFromAzimuthElevation() returns a left and right ear kernel, and an interpolated left and right frame delay for the given azimuth and elevation.
     // azimuthBlend must be in the range 0 -> 1.
@@ -57,10 +57,10 @@ public:
     // Returns the number of different azimuth angles.
     static unsigned numberOfAzimuths() { return HRTFElevation::NumberOfTotalAzimuths; }
 
-    double sampleRate() const { return m_sampleRate; }
+    float sampleRate() const { return m_sampleRate; }
     
 private:
-    explicit HRTFDatabase(double sampleRate);
+    explicit HRTFDatabase(float sampleRate);
 
     // Minimum and maximum elevation angles (inclusive) for a HRTFDatabase.
     static const int MinElevation;
@@ -80,7 +80,7 @@ private:
     static unsigned indexFromElevationAngle(double);
 
     Vector<OwnPtr<HRTFElevation> > m_elevations;                                            
-    double m_sampleRate;
+    float m_sampleRate;
 };
 
 } // namespace WebCore
index 9acdc32..cb7355a 100644 (file)
@@ -40,7 +40,7 @@ namespace WebCore {
 // Singleton
 HRTFDatabaseLoader* HRTFDatabaseLoader::s_loader = 0;
 
-PassRefPtr<HRTFDatabaseLoader> HRTFDatabaseLoader::createAndLoadAsynchronouslyIfNecessary(double sampleRate)
+PassRefPtr<HRTFDatabaseLoader> HRTFDatabaseLoader::createAndLoadAsynchronouslyIfNecessary(float sampleRate)
 {
     ASSERT(isMainThread());
 
@@ -59,7 +59,7 @@ PassRefPtr<HRTFDatabaseLoader> HRTFDatabaseLoader::createAndLoadAsynchronouslyIf
     return loader;
 }
 
-HRTFDatabaseLoader::HRTFDatabaseLoader(double sampleRate)
+HRTFDatabaseLoader::HRTFDatabaseLoader(float sampleRate)
     : m_databaseLoaderThread(0)
     , m_databaseSampleRate(sampleRate)
 {
index ee7eb22..e67707e 100644 (file)
@@ -44,7 +44,7 @@ public:
     // Lazily creates the singleton HRTFDatabaseLoader (if not already created) and starts loading asynchronously (when created the first time).
     // Returns the singleton HRTFDatabaseLoader.
     // Must be called from the main thread.
-    static PassRefPtr<HRTFDatabaseLoader> createAndLoadAsynchronouslyIfNecessary(double sampleRate);
+    static PassRefPtr<HRTFDatabaseLoader> createAndLoadAsynchronouslyIfNecessary(float sampleRate);
 
     // Returns the singleton HRTFDatabaseLoader.
     static HRTFDatabaseLoader* loader() { return s_loader; }
@@ -60,7 +60,7 @@ public:
     
     HRTFDatabase* database() { return m_hrtfDatabase.get(); }
 
-    double databaseSampleRate() const { return m_databaseSampleRate; }
+    float databaseSampleRate() const { return m_databaseSampleRate; }
     
     // Called in asynchronous loading thread.
     void load();
@@ -72,7 +72,7 @@ public:
 
 private:
     // Both constructor and destructor must be called from the main thread.
-    explicit HRTFDatabaseLoader(double sampleRate);    
+    explicit HRTFDatabaseLoader(float sampleRate);
     
     // If it hasn't already been loaded, creates a new thread and initiates asynchronous loading of the default database.
     // This must be called from the main thread.
@@ -85,7 +85,7 @@ private:
     Mutex m_threadLock;
     ThreadIdentifier m_databaseLoaderThread;
 
-    double m_databaseSampleRate;    
+    float m_databaseSampleRate;
 };
 
 } // namespace WebCore
index b0db862..cf21255 100644 (file)
@@ -52,7 +52,7 @@ const unsigned HRTFElevation::NumberOfTotalAzimuths = NumberOfRawAzimuths * Inte
 
 // Takes advantage of the symmetry and creates a composite version of the two measured versions.  For example, we have both azimuth 30 and -30 degrees
 // where the roles of left and right ears are reversed with respect to each other.
-bool HRTFElevation::calculateSymmetricKernelsForAzimuthElevation(int azimuth, int elevation, double sampleRate, const String& subjectName,
+bool HRTFElevation::calculateSymmetricKernelsForAzimuthElevation(int azimuth, int elevation, float sampleRate, const String& subjectName,
                                                                  RefPtr<HRTFKernel>& kernelL, RefPtr<HRTFKernel>& kernelR)
 {
     RefPtr<HRTFKernel> kernelL1;
@@ -71,13 +71,13 @@ bool HRTFElevation::calculateSymmetricKernelsForAzimuthElevation(int azimuth, in
         return false;
         
     // Notice L/R reversal in symmetric version.
-    kernelL = HRTFKernel::createInterpolatedKernel(kernelL1.get(), kernelR2.get(), 0.5);
-    kernelR = HRTFKernel::createInterpolatedKernel(kernelR1.get(), kernelL2.get(), 0.5);
+    kernelL = HRTFKernel::createInterpolatedKernel(kernelL1.get(), kernelR2.get(), 0.5f);
+    kernelR = HRTFKernel::createInterpolatedKernel(kernelR1.get(), kernelL2.get(), 0.5f);
     
     return true;
 }
 
-bool HRTFElevation::calculateKernelsForAzimuthElevation(int azimuth, int elevation, double sampleRate, const String& subjectName,
+bool HRTFElevation::calculateKernelsForAzimuthElevation(int azimuth, int elevation, float sampleRate, const String& subjectName,
                                                         RefPtr<HRTFKernel>& kernelL, RefPtr<HRTFKernel>& kernelR)
 {
     // Valid values for azimuth are 0 -> 345 in 15 degree increments.
@@ -158,7 +158,7 @@ static int maxElevations[] = {
     45 //  345 
 };
 
-PassOwnPtr<HRTFElevation> HRTFElevation::createForSubject(const String& subjectName, int elevation, double sampleRate)
+PassOwnPtr<HRTFElevation> HRTFElevation::createForSubject(const String& subjectName, int elevation, float sampleRate)
 {
     bool isElevationGood = elevation >= -45 && elevation <= 90 && (elevation / 15) * 15 == elevation;
     ASSERT(isElevationGood);
@@ -188,7 +188,7 @@ PassOwnPtr<HRTFElevation> HRTFElevation::createForSubject(const String& subjectN
 
         // Create the interpolated convolution kernels and delays.
         for (unsigned jj = 1; jj < InterpolationFactor; ++jj) {
-            double x = double(jj) / double(InterpolationFactor); // interpolate from 0 -> 1
+            float x = float(jj) / float(InterpolationFactor); // interpolate from 0 -> 1
 
             (*kernelListL)[i + jj] = HRTFKernel::createInterpolatedKernel(kernelListL->at(i).get(), kernelListL->at(j).get(), x);
             (*kernelListR)[i + jj] = HRTFKernel::createInterpolatedKernel(kernelListR->at(i).get(), kernelListR->at(j).get(), x);
@@ -199,7 +199,7 @@ PassOwnPtr<HRTFElevation> HRTFElevation::createForSubject(const String& subjectN
     return hrtfElevation.release();
 }
 
-PassOwnPtr<HRTFElevation> HRTFElevation::createByInterpolatingSlices(HRTFElevation* hrtfElevation1, HRTFElevation* hrtfElevation2, double x, double sampleRate)
+PassOwnPtr<HRTFElevation> HRTFElevation::createByInterpolatingSlices(HRTFElevation* hrtfElevation1, HRTFElevation* hrtfElevation2, float x, float sampleRate)
 {
     ASSERT(hrtfElevation1 && hrtfElevation2);
     if (!hrtfElevation1 || !hrtfElevation2)
index 24b7822..ccff097 100644 (file)
@@ -50,10 +50,10 @@ public:
     // Normally, there will only be a single HRTF database set, but this API supports the possibility of multiple ones with different names.
     // Interpolated azimuths will be generated based on InterpolationFactor.
     // Valid values for elevation are -45 -> +90 in 15 degree increments.
-    static PassOwnPtr<HRTFElevation> createForSubject(const String& subjectName, int elevation, double sampleRate);
+    static PassOwnPtr<HRTFElevation> createForSubject(const String& subjectName, int elevation, float sampleRate);
 
     // Given two HRTFElevations, and an interpolation factor x: 0 -> 1, returns an interpolated HRTFElevation.
-    static PassOwnPtr<HRTFElevation> createByInterpolatingSlices(HRTFElevation* hrtfElevation1, HRTFElevation* hrtfElevation2, double x, double sampleRate);
+    static PassOwnPtr<HRTFElevation> createByInterpolatingSlices(HRTFElevation* hrtfElevation1, HRTFElevation* hrtfElevation2, float x, float sampleRate);
 
     // Returns the list of left or right ear HRTFKernels for all the azimuths going from 0 to 360 degrees.
     HRTFKernelList* kernelListL() { return m_kernelListL.get(); }
@@ -61,7 +61,7 @@ public:
 
     double elevationAngle() const { return m_elevationAngle; }
     unsigned numberOfAzimuths() { return NumberOfTotalAzimuths; }
-    double sampleRate() const { return m_sampleRate; }
+    float sampleRate() const { return m_sampleRate; }
     
     // Returns the left and right kernels for the given azimuth index.
     // The interpolated delays based on azimuthBlend: 0 -> 1 are returned in frameDelayL and frameDelayR.
@@ -83,16 +83,16 @@ public:
     // Valid values for azimuth are 0 -> 345 in 15 degree increments.
     // Valid values for elevation are -45 -> +90 in 15 degree increments.
     // Returns true on success.
-    static bool calculateKernelsForAzimuthElevation(int azimuth, int elevation, double sampleRate, const String& subjectName,
+    static bool calculateKernelsForAzimuthElevation(int azimuth, int elevation, float sampleRate, const String& subjectName,
                                                     RefPtr<HRTFKernel>& kernelL, RefPtr<HRTFKernel>& kernelR);
 
     // Given a specific azimuth and elevation angle, returns the left and right HRTFKernel in kernelL and kernelR.
     // This method averages the measured response using symmetry of azimuth (for example by averaging the -30.0 and +30.0 azimuth responses).
     // Returns true on success.
-    static bool calculateSymmetricKernelsForAzimuthElevation(int azimuth, int elevation, double sampleRate, const String& subjectName,
+    static bool calculateSymmetricKernelsForAzimuthElevation(int azimuth, int elevation, float sampleRate, const String& subjectName,
                                                              RefPtr<HRTFKernel>& kernelL, RefPtr<HRTFKernel>& kernelR);
 private:
-    HRTFElevation(PassOwnPtr<HRTFKernelList> kernelListL, PassOwnPtr<HRTFKernelList> kernelListR, int elevation, double sampleRate)
+    HRTFElevation(PassOwnPtr<HRTFKernelList> kernelListL, PassOwnPtr<HRTFKernelList> kernelListR, int elevation, float sampleRate)
         : m_kernelListL(kernelListL)
         , m_kernelListR(kernelListR)
         , m_elevationAngle(elevation)
@@ -103,7 +103,7 @@ private:
     OwnPtr<HRTFKernelList> m_kernelListL;
     OwnPtr<HRTFKernelList> m_kernelListR;
     double m_elevationAngle;
-    double m_sampleRate;
+    float m_sampleRate;
 };
 
 } // namespace WebCore
index 9db35ba..68eccb9 100644 (file)
@@ -35,6 +35,7 @@
 #include "AudioChannel.h"
 #include "Biquad.h"
 #include "FFTFrame.h"
+#include "FloatConversion.h"
 #include <wtf/MathExtras.h>
 
 using namespace std;
@@ -45,7 +46,7 @@ namespace WebCore {
 // This represents the initial delay before the most energetic part of the impulse response.
 // The sample-frame delay is removed from the impulseP impulse response, and this value  is returned.
 // the length of the passed in AudioChannel must be a power of 2.
-static double extractAverageGroupDelay(AudioChannel* channel, size_t analysisFFTSize)
+static float extractAverageGroupDelay(AudioChannel* channel, size_t analysisFFTSize)
 {
     ASSERT(channel);
         
@@ -59,14 +60,14 @@ static double extractAverageGroupDelay(AudioChannel* channel, size_t analysisFFT
     FFTFrame estimationFrame(analysisFFTSize);
     estimationFrame.doFFT(impulseP);
 
-    double frameDelay = estimationFrame.extractAverageGroupDelay();
+    float frameDelay = narrowPrecisionToFloat(estimationFrame.extractAverageGroupDelay());
     estimationFrame.doInverseFFT(impulseP);
 
     return frameDelay;
 }
 
-HRTFKernel::HRTFKernel(AudioChannel* channel, size_t fftSize, double sampleRate, bool bassBoost)
-    : m_frameDelay(0.0)
+HRTFKernel::HRTFKernel(AudioChannel* channel, size_t fftSize, float sampleRate, bool bassBoost)
+    : m_frameDelay(0)
     , m_sampleRate(sampleRate)
 {
     ASSERT(channel);
@@ -116,22 +117,22 @@ PassOwnPtr<AudioChannel> HRTFKernel::createImpulseResponse()
 }
 
 // Interpolates two kernels with x: 0 -> 1 and returns the result.
-PassRefPtr<HRTFKernel> HRTFKernel::createInterpolatedKernel(HRTFKernel* kernel1, HRTFKernel* kernel2, double x)
+PassRefPtr<HRTFKernel> HRTFKernel::createInterpolatedKernel(HRTFKernel* kernel1, HRTFKernel* kernel2, float x)
 {
     ASSERT(kernel1 && kernel2);
     if (!kernel1 || !kernel2)
         return 0;
  
     ASSERT(x >= 0.0 && x < 1.0);
-    x = min(1.0, max(0.0, x));
+    x = min(1.0f, max(0.0f, x));
     
-    double sampleRate1 = kernel1->sampleRate();
-    double sampleRate2 = kernel2->sampleRate();
+    float sampleRate1 = kernel1->sampleRate();
+    float sampleRate2 = kernel2->sampleRate();
     ASSERT(sampleRate1 == sampleRate2);
     if (sampleRate1 != sampleRate2)
         return 0;
     
-    double frameDelay = (1.0 - x) * kernel1->frameDelay() + x * kernel2->frameDelay();
+    float frameDelay = (1 - x) * kernel1->frameDelay() + x * kernel2->frameDelay();
     
     OwnPtr<FFTFrame> interpolatedFrame = FFTFrame::createInterpolatedFrame(*kernel1->fftFrame(), *kernel2->fftFrame(), x);
     return HRTFKernel::create(interpolatedFrame.release(), frameDelay, sampleRate1);
index 572a085..3a8dbfd 100644 (file)
@@ -51,25 +51,25 @@ class HRTFKernel : public RefCounted<HRTFKernel> {
 public:
     // Note: this is destructive on the passed in AudioChannel.
     // The length of channel must be a power of two.
-    static PassRefPtr<HRTFKernel> create(AudioChannel* channel, size_t fftSize, double sampleRate, bool bassBoost)
+    static PassRefPtr<HRTFKernel> create(AudioChannel* channel, size_t fftSize, float sampleRate, bool bassBoost)
     {
         return adoptRef(new HRTFKernel(channel, fftSize, sampleRate, bassBoost));
     }
 
-    static PassRefPtr<HRTFKernel> create(PassOwnPtr<FFTFrame> fftFrame, double frameDelay, double sampleRate)
+    static PassRefPtr<HRTFKernel> create(PassOwnPtr<FFTFrame> fftFrame, float frameDelay, float sampleRate)
     {
         return adoptRef(new HRTFKernel(fftFrame, frameDelay, sampleRate));
     }
 
     // Given two HRTFKernels, and an interpolation factor x: 0 -> 1, returns an interpolated HRTFKernel.
-    static PassRefPtr<HRTFKernel> createInterpolatedKernel(HRTFKernel* kernel1, HRTFKernel* kernel2, double x);
+    static PassRefPtr<HRTFKernel> createInterpolatedKernel(HRTFKernel* kernel1, HRTFKernel* kernel2, float x);
   
     FFTFrame* fftFrame() { return m_fftFrame.get(); }
     
     size_t fftSize() const { return m_fftFrame->fftSize(); }
-    double frameDelay() const { return m_frameDelay; }
+    float frameDelay() const { return m_frameDelay; }
 
-    double sampleRate() const { return m_sampleRate; }
+    float sampleRate() const { return m_sampleRate; }
     double nyquist() const { return 0.5 * sampleRate(); }
 
     // Converts back into impulse-response form.
@@ -77,9 +77,9 @@ public:
 
 private:
     // Note: this is destructive on the passed in AudioChannel.
-    HRTFKernel(AudioChannel* channel, size_t fftSize, double sampleRate, bool bassBoost);
+    HRTFKernel(AudioChannel*, size_t fftSize, float sampleRate, bool bassBoost);
     
-    HRTFKernel(PassOwnPtr<FFTFrame> fftFrame, double frameDelay, double sampleRate)
+    HRTFKernel(PassOwnPtr<FFTFrame> fftFrame, float frameDelay, float sampleRate)
         : m_fftFrame(fftFrame)
         , m_frameDelay(frameDelay)
         , m_sampleRate(sampleRate)
@@ -87,8 +87,8 @@ private:
     }
     
     OwnPtr<FFTFrame> m_fftFrame;
-    double m_frameDelay;
-    double m_sampleRate;
+    float m_frameDelay;
+    float m_sampleRate;
 };
 
 typedef Vector<RefPtr<HRTFKernel> > HRTFKernelList;
index 68bc505..5377eb2 100644 (file)
@@ -44,7 +44,7 @@ namespace WebCore {
 // We ASSERT the delay values used in process() with this value.
 const double MaxDelayTimeSeconds = 0.002;
 
-HRTFPanner::HRTFPanner(double sampleRate)
+HRTFPanner::HRTFPanner(float sampleRate)
     : Panner(PanningModelHRTF)
     , m_sampleRate(sampleRate)
     , m_isFirstRender(true)
@@ -60,7 +60,7 @@ HRTFPanner::~HRTFPanner()
 {
 }
 
-size_t HRTFPanner::fftSizeForSampleRate(double sampleRate)
+size_t HRTFPanner::fftSizeForSampleRate(float sampleRate)
 {
     // The HRTF impulse responses (loaded as audio resources) are 512 sample-frames @44.1KHz.
     // Currently, we truncate the impulse responses to half this size, but an FFT-size of twice impulse response size is needed (for convolution).
index 6c13d48..e771ba2 100644 (file)
@@ -33,7 +33,7 @@ namespace WebCore {
 
 class HRTFPanner : public Panner {
 public:
-    explicit HRTFPanner(double sampleRate);
+    explicit HRTFPanner(float sampleRate);
     virtual ~HRTFPanner();
 
     // Panner
@@ -41,16 +41,16 @@ public:
     virtual void reset();
 
     size_t fftSize() { return fftSizeForSampleRate(m_sampleRate); }
-    static size_t fftSizeForSampleRate(double sampleRate);
+    static size_t fftSizeForSampleRate(float sampleRate);
 
-    double sampleRate() const { return m_sampleRate; }
+    float sampleRate() const { return m_sampleRate; }
     
 private:
     // Given an azimuth angle in the range -180 -> +180, returns the corresponding azimuth index for the database,
     // and azimuthBlend which is an interpolation value from 0 -> 1.
     int calculateDesiredAzimuthIndexAndBlend(double azimuth, double& azimuthBlend);
 
-    double m_sampleRate;
+    float m_sampleRate;
     
     // m_isFirstRender and m_azimuthIndex are used to avoid harshly changing from rendering at one azimuth angle to another angle very far away.
     // Changing the azimuth gradually produces a smoother sound.
index d3d1a2a..debc9e3 100644 (file)
@@ -38,7 +38,7 @@
 
 namespace WebCore {
 
-PassOwnPtr<Panner> Panner::create(PanningModel model, double sampleRate)
+PassOwnPtr<Panner> Panner::create(PanningModel model, float sampleRate)
 {
     OwnPtr<Panner> panner;
 
index b57ceda..4b72832 100644 (file)
@@ -47,7 +47,7 @@ public:
     
     typedef unsigned PanningModel;
 
-    static PassOwnPtr<Panner> create(PanningModel model, double sampleRate);
+    static PassOwnPtr<Panner> create(PanningModel, float sampleRate);
 
     virtual ~Panner() { };
 
index d5c5526..f2a0096 100644 (file)
@@ -34,7 +34,7 @@
 
 namespace WebCore {
 
-PassOwnPtr<AudioBus> AudioBus::loadPlatformResource(const char* name, double sampleRate)
+PassOwnPtr<AudioBus> AudioBus::loadPlatformResource(const char* name, float sampleRate)
 {
     // FIXME: the sampleRate parameter is ignored. It should be removed from the API.
     OwnPtr<AudioBus> audioBus = PlatformSupport::loadPlatformAudioResource(name, sampleRate);
@@ -48,7 +48,7 @@ PassOwnPtr<AudioBus> AudioBus::loadPlatformResource(const char* name, double sam
     return AudioBus::createBySampleRateConverting(audioBus.get(), false, sampleRate);
 }
 
-PassOwnPtr<AudioBus> createBusFromInMemoryAudioFile(const void* data, size_t dataSize, bool mixToMono, double sampleRate)
+PassOwnPtr<AudioBus> createBusFromInMemoryAudioFile(const void* data, size_t dataSize, bool mixToMono, float sampleRate)
 {
     // FIXME: the sampleRate parameter is ignored. It should be removed from the API.
     OwnPtr<AudioBus> audioBus = PlatformSupport::decodeAudioFileData(static_cast<const char*>(data), dataSize, sampleRate);
index 6e6e27a..42a718b 100644 (file)
@@ -41,7 +41,7 @@
 
 namespace WebCore {
 
-PassOwnPtr<AudioBus> AudioBus::loadPlatformResource(const char* name, double sampleRate)
+PassOwnPtr<AudioBus> AudioBus::loadPlatformResource(const char* name, float sampleRate)
 {
     // This method can be called from other than the main thread, so we need an auto-release pool.
     NSAutoreleasePool* pool = [[NSAutoreleasePool alloc] init];
index d4ecaba..8bf8cc4 100644 (file)
@@ -33,6 +33,7 @@
 #include "AudioDestinationMac.h"
 
 #include "AudioSourceProvider.h"
+#include "FloatConversion.h"
 #include <CoreAudio/AudioHardware.h>
 
 namespace WebCore {
@@ -40,12 +41,12 @@ namespace WebCore {
 const int kBufferSize = 128;
 
 // Factory method: Mac-implementation
-PassOwnPtr<AudioDestination> AudioDestination::create(AudioSourceProvider& provider, double sampleRate)
+PassOwnPtr<AudioDestination> AudioDestination::create(AudioSourceProvider& provider, float sampleRate)
 {
     return adoptPtr(new AudioDestinationMac(provider, sampleRate));
 }
 
-double AudioDestination::hardwareSampleRate()
+float AudioDestination::hardwareSampleRate()
 {
     // Determine the default output device's sample-rate.
     AudioDeviceID deviceID = kAudioDeviceUnknown;
@@ -54,7 +55,7 @@ double AudioDestination::hardwareSampleRate()
     AudioObjectPropertyAddress defaultOutputDeviceAddress = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
     OSStatus result = AudioObjectGetPropertyData(kAudioObjectSystemObject, &defaultOutputDeviceAddress, 0, 0, &infoSize, (void*)&deviceID);
     if (result)
-        return 0.0; // error
+        return 0; // error
 
     Float64 nominalSampleRate;
     infoSize = sizeof(Float64);
@@ -62,12 +63,12 @@ double AudioDestination::hardwareSampleRate()
     AudioObjectPropertyAddress nominalSampleRateAddress = { kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
     result = AudioObjectGetPropertyData(deviceID, &nominalSampleRateAddress, 0, 0, &infoSize, (void*)&nominalSampleRate);
     if (result)
-        return 0.0; // error
+        return 0; // error
 
-    return nominalSampleRate;
+    return narrowPrecisionToFloat(nominalSampleRate);
 }
 
-AudioDestinationMac::AudioDestinationMac(AudioSourceProvider& provider, double sampleRate)
+AudioDestinationMac::AudioDestinationMac(AudioSourceProvider& provider, float sampleRate)
     : m_outputUnit(0)
     , m_provider(provider)
     , m_renderBus(2, kBufferSize, false)
index 197440c..963f7c7 100644 (file)
@@ -39,14 +39,14 @@ namespace WebCore {
 
 class AudioDestinationMac : public AudioDestination {
 public:
-    AudioDestinationMac(AudioSourceProvider&, double sampleRate);
+    AudioDestinationMac(AudioSourceProvider&, float sampleRate);
     virtual ~AudioDestinationMac();
 
     virtual void start();
     virtual void stop();
     bool isPlaying() { return m_isPlaying; }
 
-    double sampleRate() const { return m_sampleRate; }
+    float sampleRate() const { return m_sampleRate; }
 
 private:
     void configure();
@@ -60,7 +60,7 @@ private:
     AudioSourceProvider& m_provider;
     AudioBus m_renderBus;
 
-    double m_sampleRate;
+    float m_sampleRate;
     bool m_isPlaying;
 };
 
index 426995e..7b666a5 100644 (file)
@@ -34,6 +34,7 @@
 
 #include "AudioBus.h"
 #include "AudioFileReader.h"
+#include "FloatConversion.h"
 #include <CoreFoundation/CoreFoundation.h>
 #include <CoreServices/CoreServices.h>
 
@@ -135,7 +136,7 @@ SInt64 AudioFileReader::getSizeProc(void* clientData)
     return audioFileReader->dataSize();
 }
 
-PassOwnPtr<AudioBus> AudioFileReader::createBus(double sampleRate, bool mixToMono)
+PassOwnPtr<AudioBus> AudioFileReader::createBus(float sampleRate, bool mixToMono)
 {
     if (!m_extAudioFileRef)
         return nullptr;
@@ -187,7 +188,7 @@ PassOwnPtr<AudioBus> AudioFileReader::createBus(double sampleRate, bool mixToMon
 
     // Create AudioBus where we'll put the PCM audio data
     OwnPtr<AudioBus> audioBus = adoptPtr(new AudioBus(busChannelCount, numberOfFrames));
-    audioBus->setSampleRate(m_clientDataFormat.mSampleRate); // save for later
+    audioBus->setSampleRate(narrowPrecisionToFloat(m_clientDataFormat.mSampleRate)); // save for later
 
     // Only allocated in the mixToMono case
     AudioFloatArray bufL;
@@ -241,13 +242,13 @@ PassOwnPtr<AudioBus> AudioFileReader::createBus(double sampleRate, bool mixToMon
     return audioBus.release();
 }
 
-PassOwnPtr<AudioBus> createBusFromAudioFile(const char* filePath, bool mixToMono, double sampleRate)
+PassOwnPtr<AudioBus> createBusFromAudioFile(const char* filePath, bool mixToMono, float sampleRate)
 {
     AudioFileReader reader(filePath);
     return reader.createBus(sampleRate, mixToMono);
 }
 
-PassOwnPtr<AudioBus> createBusFromInMemoryAudioFile(const void* data, size_t dataSize, bool mixToMono, double sampleRate)
+PassOwnPtr<AudioBus> createBusFromInMemoryAudioFile(const void* data, size_t dataSize, bool mixToMono, float sampleRate)
 {
     AudioFileReader reader(data, dataSize);
     return reader.createBus(sampleRate, mixToMono);
index d531266..2123431 100644 (file)
@@ -46,7 +46,7 @@ public:
     ~AudioFileReader();
 
     // Returns 0 if error
-    PassOwnPtr<AudioBus> createBus(double sampleRate, bool mixToMono);
+    PassOwnPtr<AudioBus> createBus(float sampleRate, bool mixToMono);
 
     const void* data() const { return m_data; }
     size_t dataSize() const { return m_dataSize; }
index a9a8902..cd49ae0 100644 (file)
@@ -54,7 +54,7 @@ AsyncAudioDecoder::~AsyncAudioDecoder()
     m_threadID = 0;
 }
 
-void AsyncAudioDecoder::decodeAsync(ArrayBuffer* audioData, double sampleRate, PassRefPtr<AudioBufferCallback> successCallback, PassRefPtr<AudioBufferCallback> errorCallback)
+void AsyncAudioDecoder::decodeAsync(ArrayBuffer* audioData, float sampleRate, PassRefPtr<AudioBufferCallback> successCallback, PassRefPtr<AudioBufferCallback> errorCallback)
 {
     ASSERT(isMainThread());
     ASSERT(audioData);
@@ -91,12 +91,12 @@ void AsyncAudioDecoder::runLoop()
     }
 }
 
-PassOwnPtr<AsyncAudioDecoder::DecodingTask> AsyncAudioDecoder::DecodingTask::create(ArrayBuffer* audioData, double sampleRate, PassRefPtr<AudioBufferCallback> successCallback, PassRefPtr<AudioBufferCallback> errorCallback)
+PassOwnPtr<AsyncAudioDecoder::DecodingTask> AsyncAudioDecoder::DecodingTask::create(ArrayBuffer* audioData, float sampleRate, PassRefPtr<AudioBufferCallback> successCallback, PassRefPtr<AudioBufferCallback> errorCallback)
 {
     return adoptPtr(new DecodingTask(audioData, sampleRate, successCallback, errorCallback));
 }
 
-AsyncAudioDecoder::DecodingTask::DecodingTask(ArrayBuffer* audioData, double sampleRate, PassRefPtr<AudioBufferCallback> successCallback, PassRefPtr<AudioBufferCallback> errorCallback)
+AsyncAudioDecoder::DecodingTask::DecodingTask(ArrayBuffer* audioData, float sampleRate, PassRefPtr<AudioBufferCallback> successCallback, PassRefPtr<AudioBufferCallback> errorCallback)
     : m_audioData(audioData)
     , m_sampleRate(sampleRate)
     , m_successCallback(successCallback)
index f2cca8d..f09542d 100644 (file)
@@ -47,21 +47,21 @@ public:
     ~AsyncAudioDecoder();
 
     // Must be called on the main thread.
-    void decodeAsync(ArrayBuffer* audioData, double sampleRate, PassRefPtr<AudioBufferCallback> successCallback, PassRefPtr<AudioBufferCallback> errorCallback);
+    void decodeAsync(ArrayBuffer* audioData, float sampleRate, PassRefPtr<AudioBufferCallback> successCallback, PassRefPtr<AudioBufferCallback> errorCallback);
 
 private:
     class DecodingTask {
         WTF_MAKE_NONCOPYABLE(DecodingTask);
     public:
-        static PassOwnPtr<DecodingTask> create(ArrayBuffer* audioData, double sampleRate, PassRefPtr<AudioBufferCallback> successCallback, PassRefPtr<AudioBufferCallback> errorCallback);
+        static PassOwnPtr<DecodingTask> create(ArrayBuffer* audioData, float sampleRate, PassRefPtr<AudioBufferCallback> successCallback, PassRefPtr<AudioBufferCallback> errorCallback);
 
         void decode();
         
     private:
-        DecodingTask(ArrayBuffer* audioData, double sampleRate, PassRefPtr<AudioBufferCallback> successCallback, PassRefPtr<AudioBufferCallback> errorCallback);
+        DecodingTask(ArrayBuffer* audioData, float sampleRate, PassRefPtr<AudioBufferCallback> successCallback, PassRefPtr<AudioBufferCallback> errorCallback);
 
         ArrayBuffer* audioData() { return m_audioData.get(); }
-        double sampleRate() const { return m_sampleRate; }
+        float sampleRate() const { return m_sampleRate; }
         AudioBufferCallback* successCallback() { return m_successCallback.get(); }
         AudioBufferCallback* errorCallback() { return m_errorCallback.get(); }
         AudioBuffer* audioBuffer() { return m_audioBuffer.get(); }
@@ -70,7 +70,7 @@ private:
         void notifyComplete();
 
         RefPtr<ArrayBuffer> m_audioData;
-        double m_sampleRate;
+        float m_sampleRate;
         RefPtr<AudioBufferCallback> m_successCallback;
         RefPtr<AudioBufferCallback> m_errorCallback;
         RefPtr<AudioBuffer> m_audioBuffer;
index 828062e..90142f2 100644 (file)
@@ -36,7 +36,7 @@
 
 namespace WebCore {
 
-AudioBasicProcessorNode::AudioBasicProcessorNode(AudioContext* context, double sampleRate)
+AudioBasicProcessorNode::AudioBasicProcessorNode(AudioContext* context, float sampleRate)
     : AudioNode(context, sampleRate)
 {
     addInput(adoptPtr(new AudioNodeInput(this)));
index 38bfd3b..5a555da 100644 (file)
@@ -39,7 +39,7 @@ class AudioProcessor;
 // AudioBasicProcessorNode is an AudioNode with one input and one output where the input and output have the same number of channels.
 class AudioBasicProcessorNode : public AudioNode {
 public:
-    AudioBasicProcessorNode(AudioContext*, double sampleRate);
+    AudioBasicProcessorNode(AudioContext*, float sampleRate);
 
     // AudioNode
     virtual void process(size_t framesToProcess);
index ff4f042..6e24d3b 100644 (file)
 
 namespace WebCore {
 
-PassRefPtr<AudioBuffer> AudioBuffer::create(unsigned numberOfChannels, size_t numberOfFrames, double sampleRate)
+PassRefPtr<AudioBuffer> AudioBuffer::create(unsigned numberOfChannels, size_t numberOfFrames, float sampleRate)
 {
     return adoptRef(new AudioBuffer(numberOfChannels, numberOfFrames, sampleRate));
 }
 
-PassRefPtr<AudioBuffer> AudioBuffer::createFromAudioFileData(const void* data, size_t dataSize, bool mixToMono, double sampleRate)
+PassRefPtr<AudioBuffer> AudioBuffer::createFromAudioFileData(const void* data, size_t dataSize, bool mixToMono, float sampleRate)
 {
     OwnPtr<AudioBus> bus = createBusFromInMemoryAudioFile(data, dataSize, mixToMono, sampleRate);
     if (bus.get())
@@ -53,7 +53,7 @@ PassRefPtr<AudioBuffer> AudioBuffer::createFromAudioFileData(const void* data, s
     return 0;
 }
 
-AudioBuffer::AudioBuffer(unsigned numberOfChannels, size_t numberOfFrames, double sampleRate)
+AudioBuffer::AudioBuffer(unsigned numberOfChannels, size_t numberOfFrames, float sampleRate)
     : m_gain(1.0)
     , m_sampleRate(sampleRate)
     , m_length(numberOfFrames)
index b11a20e..d5e9f2b 100644 (file)
@@ -41,15 +41,15 @@ class AudioBus;
     
 class AudioBuffer : public RefCounted<AudioBuffer> {
 public:   
-    static PassRefPtr<AudioBuffer> create(unsigned numberOfChannels, size_t numberOfFrames, double sampleRate);
+    static PassRefPtr<AudioBuffer> create(unsigned numberOfChannels, size_t numberOfFrames, float sampleRate);
 
     // Returns 0 if data is not a valid audio file.
-    static PassRefPtr<AudioBuffer> createFromAudioFileData(const void* data, size_t dataSize, bool mixToMono, double sampleRate);
+    static PassRefPtr<AudioBuffer> createFromAudioFileData(const void* data, size_t dataSize, bool mixToMono, float sampleRate);
 
     // Format
     size_t length() const { return m_length; }
     double duration() const { return length() / sampleRate(); }
-    double sampleRate() const { return m_sampleRate; }
+    float sampleRate() const { return m_sampleRate; }
 
     // Channel data access
     unsigned numberOfChannels() const { return m_channels.size(); }
@@ -66,11 +66,11 @@ public:
     void releaseMemory();
     
 protected:
-    AudioBuffer(unsigned numberOfChannels, size_t numberOfFrames, double sampleRate);
+    AudioBuffer(unsigned numberOfChannels, size_t numberOfFrames, float sampleRate);
     AudioBuffer(AudioBus* bus);
 
     double m_gain; // scalar gain
-    double m_sampleRate;
+    float m_sampleRate;
     size_t m_length;
 
     Vector<RefPtr<Float32Array> > m_channels;
index 4a230e0..b4b863f 100644 (file)
@@ -31,6 +31,7 @@
 #include "AudioContext.h"
 #include "AudioNodeOutput.h"
 #include "Document.h"
+#include "FloatConversion.h"
 #include "ScriptCallStack.h"
 #include <algorithm>
 #include <wtf/MainThread.h>
@@ -48,12 +49,12 @@ const double UnknownTime = -1;
 // to minimize linear interpolation aliasing.
 const double MaxRate = 1024;
 
-PassRefPtr<AudioBufferSourceNode> AudioBufferSourceNode::create(AudioContext* context, double sampleRate)
+PassRefPtr<AudioBufferSourceNode> AudioBufferSourceNode::create(AudioContext* context, float sampleRate)
 {
     return adoptRef(new AudioBufferSourceNode(context, sampleRate));
 }
 
-AudioBufferSourceNode::AudioBufferSourceNode(AudioContext* context, double sampleRate)
+AudioBufferSourceNode::AudioBufferSourceNode(AudioContext* context, float sampleRate)
     : AudioSourceNode(context, sampleRate)
     , m_buffer(0)
     , m_isPlaying(false)
@@ -100,7 +101,7 @@ void AudioBufferSourceNode::process(size_t framesToProcess)
     // Careful - this is a tryLock() and not an autolocker, so we must unlock() before every return.
     if (m_processLock.tryLock()) {
         // Check if it's time to start playing.
-        double sampleRate = this->sampleRate();
+        float sampleRate = this->sampleRate();
         double quantumStartTime = context()->currentTime();
         double quantumEndTime = quantumStartTime + framesToProcess / sampleRate;
 
@@ -133,8 +134,8 @@ void AudioBufferSourceNode::process(size_t framesToProcess)
         // If the end time is somewhere in the middle of this time quantum, then simply zero out the
         // frames starting at the end time.
         if (m_endTime != UnknownTime && m_endTime >= quantumStartTime && m_endTime < quantumEndTime) {
-            unsigned zeroStartFrame = (m_endTime - quantumStartTime) * sampleRate;
-            unsigned framesToZero = framesToProcess - zeroStartFrame;
+            size_t zeroStartFrame = narrowPrecisionToFloat((m_endTime - quantumStartTime) * sampleRate);
+            size_t framesToZero = framesToProcess - zeroStartFrame;
 
             bool isSafe = zeroStartFrame < framesToProcess && framesToZero <= framesToProcess && zeroStartFrame + framesToZero <= framesToProcess;
             ASSERT(isSafe);
@@ -272,13 +273,13 @@ void AudioBufferSourceNode::renderFromBuffer(AudioBus* bus, unsigned destination
         double sampleL1 = sourceL[readIndex];
         double sampleL2 = sourceL[readIndex2];
         double sampleL = (1.0 - interpolationFactor) * sampleL1 + interpolationFactor * sampleL2;
-        *destinationL++ = sampleL;
+        *destinationL++ = narrowPrecisionToFloat(sampleL);
 
         if (isStereo) {
             double sampleR1 = sourceR[readIndex];
             double sampleR2 = sourceR[readIndex2];
             double sampleR = (1.0 - interpolationFactor) * sampleR1 + interpolationFactor * sampleR2;
-            *destinationR++ = sampleR;
+            *destinationR++ = narrowPrecisionToFloat(sampleR);
         }
 
         virtualReadIndex += pitchRate;
index c769c97..fd73081 100644 (file)
@@ -43,7 +43,7 @@ class AudioContext;
 
 class AudioBufferSourceNode : public AudioSourceNode {
 public:
-    static PassRefPtr<AudioBufferSourceNode> create(AudioContext*, double sampleRate);
+    static PassRefPtr<AudioBufferSourceNode> create(AudioContext*, float sampleRate);
 
     virtual ~AudioBufferSourceNode();
     
@@ -83,7 +83,7 @@ public:
     void setPannerNode(PassRefPtr<AudioPannerNode> pannerNode) { m_pannerNode = pannerNode; }
 
 private:
-    AudioBufferSourceNode(AudioContext*, double sampleRate);
+    AudioBufferSourceNode(AudioContext*, float sampleRate);
 
     void renderFromBuffer(AudioBus*, unsigned destinationFrameOffset, size_t numberOfFrames);
 
index 0f8cc00..51e7855 100644 (file)
@@ -42,7 +42,7 @@ namespace WebCore {
 // It can easily be increased to support more if the web audio specification is updated.
 const unsigned NumberOfInputs = 6;
 
-AudioChannelMerger::AudioChannelMerger(AudioContext* context, double sampleRate)
+AudioChannelMerger::AudioChannelMerger(AudioContext* context, float sampleRate)
     : AudioNode(context, sampleRate)
 {
     // Create a fixed number of inputs (able to handle the maximum number of channels we deal with).
index b7b718b..e773dae 100644 (file)
@@ -38,7 +38,7 @@ class AudioContext;
     
 class AudioChannelMerger : public AudioNode {
 public:
-    static PassRefPtr<AudioChannelMerger> create(AudioContext* context, double sampleRate)
+    static PassRefPtr<AudioChannelMerger> create(AudioContext* context, float sampleRate)
     {
         return adoptRef(new AudioChannelMerger(context, sampleRate));      
     }
@@ -51,7 +51,7 @@ public:
     virtual void checkNumberOfChannelsForInput(AudioNodeInput*);
 
 private:
-    AudioChannelMerger(AudioContext*, double sampleRate);
+    AudioChannelMerger(AudioContext*, float sampleRate);
 };
 
 } // namespace WebCore
index 7e90b7a..e5cc72a 100644 (file)
@@ -37,7 +37,7 @@ namespace WebCore {
 // It can easily be increased to support more if the web audio specification is updated.
 const unsigned NumberOfOutputs = 6;
 
-AudioChannelSplitter::AudioChannelSplitter(AudioContext* context, double sampleRate)
+AudioChannelSplitter::AudioChannelSplitter(AudioContext* context, float sampleRate)
     : AudioNode(context, sampleRate)
 {
     addInput(adoptPtr(new AudioNodeInput(this)));
index 7dadac5..71b0ef4 100644 (file)
@@ -34,7 +34,7 @@ class AudioContext;
     
 class AudioChannelSplitter : public AudioNode {
 public:
-    static PassRefPtr<AudioChannelSplitter> create(AudioContext* context, double sampleRate)
+    static PassRefPtr<AudioChannelSplitter> create(AudioContext* context, float sampleRate)
     {
         return adoptRef(new AudioChannelSplitter(context, sampleRate));      
     }
@@ -44,7 +44,7 @@ public:
     virtual void reset();
 
 private:
-    AudioChannelSplitter(AudioContext*, double sampleRate);
+    AudioChannelSplitter(AudioContext*, float sampleRate);
 };
 
 } // namespace WebCore
index 39e8f3d..3efffec 100644 (file)
@@ -82,7 +82,7 @@ namespace WebCore {
     
 namespace {
     
-bool isSampleRateRangeGood(double sampleRate)
+bool isSampleRateRangeGood(float sampleRate)
 {
     return sampleRate >= 22050 && sampleRate <= 96000;
 }
@@ -103,7 +103,7 @@ PassRefPtr<AudioContext> AudioContext::create(Document* document)
     return adoptRef(new AudioContext(document));
 }
 
-PassRefPtr<AudioContext> AudioContext::createOfflineContext(Document* document, unsigned numberOfChannels, size_t numberOfFrames, double sampleRate, ExceptionCode& ec)
+PassRefPtr<AudioContext> AudioContext::createOfflineContext(Document* document, unsigned numberOfChannels, size_t numberOfFrames, float sampleRate, ExceptionCode& ec)
 {
     ASSERT(document);
 
@@ -143,7 +143,7 @@ AudioContext::AudioContext(Document* document)
 }
 
 // Constructor for offline (non-realtime) rendering.
-AudioContext::AudioContext(Document* document, unsigned numberOfChannels, size_t numberOfFrames, double sampleRate)
+AudioContext::AudioContext(Document* document, unsigned numberOfChannels, size_t numberOfFrames, float sampleRate)
     : ActiveDOMObject(document, this)
     , m_isInitialized(false)
     , m_isAudioThreadFinished(false)
@@ -294,7 +294,7 @@ void AudioContext::refBuffer(PassRefPtr<AudioBuffer> buffer)
     m_allocatedBuffers.append(buffer);
 }
 
-PassRefPtr<AudioBuffer> AudioContext::createBuffer(unsigned numberOfChannels, size_t numberOfFrames, double sampleRate)
+PassRefPtr<AudioBuffer> AudioContext::createBuffer(unsigned numberOfChannels, size_t numberOfFrames, float sampleRate)
 {
     if (!isSampleRateRangeGood(sampleRate) || numberOfChannels > 10 || !numberOfFrames)
         return 0;
index 8f5c35b..fa73d70 100644 (file)
@@ -76,7 +76,7 @@ public:
     static PassRefPtr<AudioContext> create(Document*);
 
     // Create an AudioContext for offline (non-realtime) rendering.
-    static PassRefPtr<AudioContext> createOfflineContext(Document*, unsigned numberOfChannels, size_t numberOfFrames, double sampleRate, ExceptionCode&);
+    static PassRefPtr<AudioContext> createOfflineContext(Document*, unsigned numberOfChannels, size_t numberOfFrames, float sampleRate, ExceptionCode&);
 
     virtual ~AudioContext();
 
@@ -95,9 +95,9 @@ public:
 
     AudioDestinationNode* destination() { return m_destinationNode.get(); }
     double currentTime() { return m_destinationNode->currentTime(); }
-    double sampleRate() { return m_destinationNode->sampleRate(); }
+    float sampleRate() { return m_destinationNode->sampleRate(); }
 
-    PassRefPtr<AudioBuffer> createBuffer(unsigned numberOfChannels, size_t numberOfFrames, double sampleRate);
+    PassRefPtr<AudioBuffer> createBuffer(unsigned numberOfChannels, size_t numberOfFrames, float sampleRate);
     PassRefPtr<AudioBuffer> createBuffer(ArrayBuffer* arrayBuffer, bool mixToMono);
 
     // Asynchronous audio file data decoding.
@@ -226,7 +226,7 @@ public:
     
 private:
     AudioContext(Document*);
-    AudioContext(Document*, unsigned numberOfChannels, size_t numberOfFrames, double sampleRate);
+    AudioContext(Document*, unsigned numberOfChannels, size_t numberOfFrames, float sampleRate);
     void constructCommon();
 
     void lazyInitialize();
index a735ca1..ff9ebbd 100644 (file)
@@ -36,7 +36,7 @@
 
 namespace WebCore {
     
-AudioDestinationNode::AudioDestinationNode(AudioContext* context, double sampleRate)
+AudioDestinationNode::AudioDestinationNode(AudioContext* context, float sampleRate)
     : AudioNode(context, sampleRate)
     , m_currentTime(0.0)
 {
index 5913205..d7bc7bc 100644 (file)
@@ -36,7 +36,7 @@ class AudioContext;
     
 class AudioDestinationNode : public AudioNode, public AudioSourceProvider {
 public:
-    AudioDestinationNode(AudioContext*, double sampleRate);
+    AudioDestinationNode(AudioContext*, float sampleRate);
     virtual ~AudioDestinationNode();
     
     // AudioNode   
@@ -48,7 +48,7 @@ public:
 
     double currentTime() { return m_currentTime; }
 
-    virtual double sampleRate() const = 0;
+    virtual float sampleRate() const = 0;
 
     virtual unsigned numberOfChannels() const { return 2; } // FIXME: update when multi-channel (more than stereo) is supported
 
index 3678792..2129c85 100644 (file)
@@ -34,7 +34,7 @@
 
 namespace WebCore {
 
-AudioGainNode::AudioGainNode(AudioContext* context, double sampleRate)
+AudioGainNode::AudioGainNode(AudioContext* context, float sampleRate)
     : AudioNode(context, sampleRate)
     , m_lastGain(1.0)
     , m_sampleAccurateGainValues(AudioNode::ProcessingSizeInFrames) // FIXME: can probably share temp buffer in context
index a0d3c2b..e634212 100644 (file)
@@ -39,7 +39,7 @@ class AudioContext;
 
 class AudioGainNode : public AudioNode {
 public:
-    static PassRefPtr<AudioGainNode> create(AudioContext* context, double sampleRate)
+    static PassRefPtr<AudioGainNode> create(AudioContext* context, float sampleRate)
     {
         return adoptRef(new AudioGainNode(context, sampleRate));      
     }
@@ -55,7 +55,7 @@ public:
     AudioGain* gain() { return m_gain.get(); }                                   
     
 private:
-    AudioGainNode(AudioContext*, double sampleRate);
+    AudioGainNode(AudioContext*, float sampleRate);
 
     double m_lastGain; // for de-zippering
     RefPtr<AudioGain> m_gain;
index 44fb02c..2f1ef76 100644 (file)
@@ -38,10 +38,10 @@ namespace WebCore {
 
 AudioListener::AudioListener()
     : m_position(0, 0, 0)
-    , m_orientation(0.0, 0.0, -1.0)
-    , m_upVector(0.0, 1.0, 0.0)
+    , m_orientation(0, 0, -1)
+    , m_upVector(0, 1, 0)
     , m_velocity(0, 0, 0)
-    , m_dopplerFactor(1.0)
+    , m_dopplerFactor(1)
     , m_speedOfSound(343.3)
 {
 }
index 5281a89..8b5d8ad 100644 (file)
@@ -45,12 +45,12 @@ public:
     }
 
     // Position
-    void setPosition(double x, double y, double z) { setPosition(FloatPoint3D(x, y, z)); }
+    void setPosition(float x, float y, float z) { setPosition(FloatPoint3D(x, y, z)); }
     void setPosition(const FloatPoint3D &position) { m_position = position; }
     const FloatPoint3D& position() const { return m_position; }
 
     // Orientation
-    void setOrientation(double x, double y, double z, double upX, double upY, double upZ)
+    void setOrientation(float x, float y, float z, float upX, float upY, float upZ)
     {
         setOrientation(FloatPoint3D(x, y, z));
         setUpVector(FloatPoint3D(upX, upY, upZ));
@@ -63,7 +63,7 @@ public:
     const FloatPoint3D& upVector() const { return m_upVector; }
 
     // Velocity
-    void setVelocity(double x, double y, double z) { setVelocity(FloatPoint3D(x, y, z)); }
+    void setVelocity(float x, float y, float z) { setVelocity(FloatPoint3D(x, y, z)); }
     void setVelocity(const FloatPoint3D &velocity) { m_velocity = velocity; }
     const FloatPoint3D& velocity() const { return m_velocity; }
 
index 0c17de6..c2bbaf7 100644 (file)
@@ -36,7 +36,7 @@
 
 namespace WebCore {
 
-AudioNode::AudioNode(AudioContext* context, double sampleRate)
+AudioNode::AudioNode(AudioContext* context, float sampleRate)
     : m_isInitialized(false)
     , m_nodeType(NodeTypeUnknown)
     , m_context(context)
index 00871dd..86ffe7c 100644 (file)
@@ -48,7 +48,7 @@ class AudioNode {
 public:
     enum { ProcessingSizeInFrames = 128 };
 
-    AudioNode(AudioContext*, double sampleRate);
+    AudioNode(AudioContext*, float sampleRate);
     virtual ~AudioNode();
 
     AudioContext* context() { return m_context.get(); }
@@ -117,7 +117,7 @@ public:
     bool connect(AudioNode* destination, unsigned outputIndex = 0, unsigned inputIndex = 0);
     bool disconnect(unsigned outputIndex = 0);
 
-    double sampleRate() const { return m_sampleRate; }
+    float sampleRate() const { return m_sampleRate; }
 
     // processIfNecessary() is called by our output(s) when the rendering graph needs this AudioNode to process.
     // This method ensures that the AudioNode will only process once per rendering time quantum even if it's called repeatedly.
@@ -150,7 +150,7 @@ private:
     volatile bool m_isInitialized;
     NodeType m_nodeType;
     RefPtr<AudioContext> m_context;
-    double m_sampleRate;
+    float m_sampleRate;
     Vector<OwnPtr<AudioNodeInput> > m_inputs;
     Vector<OwnPtr<AudioNodeOutput> > m_outputs;
 
index 7d26fea..faf7f15 100644 (file)
@@ -46,7 +46,7 @@ static void fixNANs(double &x)
         x = 0.0;
 }
 
-AudioPannerNode::AudioPannerNode(AudioContext* context, double sampleRate)
+AudioPannerNode::AudioPannerNode(AudioContext* context, float sampleRate)
     : AudioNode(context, sampleRate)
     , m_panningModel(Panner::PanningModelHRTF)
     , m_lastGain(-1.0)
@@ -188,7 +188,7 @@ void AudioPannerNode::getAzimuthElevation(double* outAzimuth, double* outElevati
 
     FloatPoint3D up = listenerRight.cross(listenerFrontNorm);
 
-    double upProjection = sourceListener.dot(up);
+    float upProjection = sourceListener.dot(up);
 
     FloatPoint3D projectedSource = sourceListener - upProjection * up;
     projectedSource.normalize();
index 61e34a9..6ecebea 100644 (file)
@@ -53,7 +53,7 @@ public:
         SOUNDFIELD = 2,
     };
 
-    static PassRefPtr<AudioPannerNode> create(AudioContext* context, double sampleRate)
+    static PassRefPtr<AudioPannerNode> create(AudioContext* context, float sampleRate)
     {
         return adoptRef(new AudioPannerNode(context, sampleRate));
     }
@@ -117,7 +117,7 @@ public:
     AudioGain* coneGain() { return m_coneGain.get(); }                                        
 
 private:
-    AudioPannerNode(AudioContext*, double sampleRate);
+    AudioPannerNode(AudioContext*, float sampleRate);
 
     // Returns the combined distance and cone gain attenuation.
     float distanceConeGain();
index 9056498..be9279f 100644 (file)
@@ -31,6 +31,7 @@
 
 #include "AudioNode.h"
 #include "AudioUtilities.h"
+#include "FloatConversion.h"
 #include <wtf/MathExtras.h>
 
 namespace WebCore {
@@ -43,13 +44,13 @@ float AudioParam::value()
     // Update value for timeline.
     if (context() && context()->isAudioThread()) {
         bool hasValue;
-        float timelineValue = m_timeline.valueForContextTime(context(), m_value, hasValue);
+        float timelineValue = m_timeline.valueForContextTime(context(), narrowPrecisionToFloat(m_value), hasValue);
 
         if (hasValue)
             m_value = timelineValue;
     }
 
-    return static_cast<float>(m_value);
+    return narrowPrecisionToFloat(m_value);
 }
 
 void AudioParam::setValue(float value)
@@ -62,7 +63,7 @@ void AudioParam::setValue(float value)
 
 float AudioParam::smoothedValue()
 {
-    return static_cast<float>(m_smoothedValue);
+    return narrowPrecisionToFloat(m_smoothedValue);
 }
 
 bool AudioParam::smooth()
@@ -71,7 +72,7 @@ bool AudioParam::smooth()
     // Smoothing effectively is performed by the timeline.
     bool useTimelineValue = false;
     if (context())
-        m_value = m_timeline.valueForContextTime(context(), m_value, useTimelineValue);
+        m_value = m_timeline.valueForContextTime(context(), narrowPrecisionToFloat(m_value), useTimelineValue);
     
     if (m_smoothedValue == m_value) {
         // Smoothed value has already approached and snapped to value.
@@ -102,12 +103,12 @@ void AudioParam::calculateSampleAccurateValues(float* values, unsigned numberOfV
     // Calculate values for this render quantum.
     // Normally numberOfValues will equal AudioNode::ProcessingSizeInFrames (the render quantum size).
     float sampleRate = context()->sampleRate();
-    float startTime = context()->currentTime();
+    float startTime = narrowPrecisionToFloat(context()->currentTime());
     float endTime = startTime + numberOfValues / sampleRate;
 
     // Note we're running control rate at the sample-rate.
     // Pass in the current value as default value.
-    m_value = m_timeline.valuesForTimeRange(startTime, endTime, m_value, values, numberOfValues, sampleRate, sampleRate);
+    m_value = m_timeline.valuesForTimeRange(startTime, endTime, narrowPrecisionToFloat(m_value), values, numberOfValues, sampleRate, sampleRate);
 }
 
 } // namespace WebCore
index 0451884..2ee134a 100644 (file)
@@ -30,6 +30,7 @@
 #include "AudioParamTimeline.h"
 
 #include "AudioUtilities.h"
+#include "FloatConversion.h"
 #include <algorithm>
 #include <wtf/MathExtras.h>
 
@@ -124,8 +125,8 @@ float AudioParamTimeline::valueForContextTime(AudioContext* context, float defau
     // Ask for just a single value.
     float value;
     float sampleRate = context->sampleRate();
-    float startTime = context->currentTime();
-    float endTime = startTime + 1.1 / sampleRate; // time just beyond one sample-frame
+    float startTime = narrowPrecisionToFloat(context->currentTime());
+    float endTime = startTime + 1.1f / sampleRate; // time just beyond one sample-frame
     float controlRate = sampleRate / AudioNode::ProcessingSizeInFrames; // one parameter change per render quantum
     value = valuesForTimeRange(startTime, endTime, defaultValue, &value, 1, sampleRate, controlRate);
 
@@ -157,7 +158,7 @@ float AudioParamTimeline::valuesForTimeRange(float startTime,
 }
 
 // Returns the rounded down integer sample-frame for the time and sample-rate.
-static unsigned timeToSampleFrame(double time, double sampleRate)
+static unsigned timeToSampleFrame(double time, float sampleRate)
 {
     double k = 0.5 / sampleRate;
     return static_cast<unsigned>((time + k) * sampleRate);
@@ -184,7 +185,7 @@ float AudioParamTimeline::valuesForTimeRangeImpl(float startTime,
     }
 
     // Maintain a running time and index for writing the values buffer.
-    double currentTime = startTime;
+    float currentTime = startTime;
     unsigned writeIndex = 0;
 
     // If first event is after startTime then fill initial part of values buffer with defaultValue
@@ -245,8 +246,8 @@ float AudioParamTimeline::valuesForTimeRangeImpl(float startTime,
                     values[writeIndex] = value;
             } else {
                 // Interpolate in log space.
-                value1 = log2(value1);
-                value2 = log2(value2);
+                value1 = log2f(value1);
+                value2 = log2f(value2);
 
                 // FIXME: optimize to not use pow() in inner loop, this is just a simple exponential ramp.
                 for (; writeIndex < fillToFrame; ++writeIndex) {
index 6091371..a6bdd42 100644 (file)
@@ -35,7 +35,7 @@ namespace WebCore {
 
 class AudioSourceNode : public AudioNode {
 public:
-    AudioSourceNode(AudioContext* context, double sampleRate)
+    AudioSourceNode(AudioContext* context, float sampleRate)
         : AudioNode(context, sampleRate)
     {
     }
index ced07b8..f16d8e5 100644 (file)
@@ -32,7 +32,7 @@
 
 namespace WebCore {
 
-BiquadFilterNode::BiquadFilterNode(AudioContext* context, double sampleRate)
+BiquadFilterNode::BiquadFilterNode(AudioContext* context, float sampleRate)
     : AudioBasicProcessorNode(context, sampleRate)
 {
     // Initially setup as lowpass filter.
index dfe17b3..683b2a3 100644 (file)
@@ -46,7 +46,7 @@ public:
         ALLPASS = 7
     };
 
-    static PassRefPtr<BiquadFilterNode> create(AudioContext* context, double sampleRate)
+    static PassRefPtr<BiquadFilterNode> create(AudioContext* context, float sampleRate)
     {
         return adoptRef(new BiquadFilterNode(context, sampleRate));      
     }
@@ -59,7 +59,7 @@ public:
     AudioParam* gain() { return biquadProcessor()->parameter3(); }
     
 private:
-    BiquadFilterNode(AudioContext*, double sampleRate);
+    BiquadFilterNode(AudioContext*, float sampleRate);
 
     BiquadProcessor* biquadProcessor() { return static_cast<BiquadProcessor*>(processor()); }
 };
index 540e1fa..c755277 100644 (file)
@@ -32,7 +32,7 @@
 
 namespace WebCore {
     
-BiquadProcessor::BiquadProcessor(double sampleRate, size_t numberOfChannels, bool autoInitialize)
+BiquadProcessor::BiquadProcessor(float sampleRate, size_t numberOfChannels, bool autoInitialize)
     : AudioDSPKernelProcessor(sampleRate, numberOfChannels)
     , m_type(LowPass)
     , m_parameter1(0)
@@ -51,7 +51,7 @@ BiquadProcessor::BiquadProcessor(double sampleRate, size_t numberOfChannels, boo
         initialize();
 }
 
-BiquadProcessor::BiquadProcessor(FilterType type, double sampleRate, size_t numberOfChannels, bool autoInitialize)
+BiquadProcessor::BiquadProcessor(FilterType type, float sampleRate, size_t numberOfChannels, bool autoInitialize)
     : AudioDSPKernelProcessor(sampleRate, numberOfChannels)
     , m_type(type)
     , m_parameter1(0)
index 3c52185..e748726 100644 (file)
@@ -49,10 +49,10 @@ public:
         Allpass = 7
     };
 
-    BiquadProcessor(double sampleRate, size_t numberOfChannels, bool autoInitialize);
+    BiquadProcessor(float sampleRate, size_t numberOfChannels, bool autoInitialize);
 
     // Old constructor used by deprecated LowPass2FilterNode and HighPass2FilterNode
-    BiquadProcessor(FilterType, double sampleRate, size_t numberOfChannels, bool autoInitialize = true);
+    BiquadProcessor(FilterType, float sampleRate, size_t numberOfChannels, bool autoInitialize = true);
 
     virtual ~BiquadProcessor();
     
index 28d57db..3158356 100644 (file)
@@ -45,7 +45,7 @@ const size_t MaxFFTSize = 32768;
 
 namespace WebCore {
 
-ConvolverNode::ConvolverNode(AudioContext* context, double sampleRate)
+ConvolverNode::ConvolverNode(AudioContext* context, float sampleRate)
     : AudioNode(context, sampleRate)
 {
     addInput(adoptPtr(new AudioNodeInput(this)));
index 7b71ba9..fb29d5c 100644 (file)
@@ -37,7 +37,7 @@ class Reverb;
     
 class ConvolverNode : public AudioNode {
 public:
-    static PassRefPtr<ConvolverNode> create(AudioContext* context, double sampleRate)
+    static PassRefPtr<ConvolverNode> create(AudioContext* context, float sampleRate)
     {
         return adoptRef(new ConvolverNode(context, sampleRate));      
     }
@@ -55,7 +55,7 @@ public:
     AudioBuffer* buffer();
 
 private:
-    ConvolverNode(AudioContext*, double sampleRate);
+    ConvolverNode(AudioContext*, float sampleRate);
 
     OwnPtr<Reverb> m_reverb;
     RefPtr<AudioBuffer> m_buffer;
index f1a7263..f7db34e 100644 (file)
@@ -50,7 +50,7 @@ void DefaultAudioDestinationNode::initialize()
     if (isInitialized())
         return;
 
-    double hardwareSampleRate = AudioDestination::hardwareSampleRate();
+    float hardwareSampleRate = AudioDestination::hardwareSampleRate();
 #ifndef NDEBUG    
     fprintf(stderr, ">>>> hardwareSampleRate = %f\n", hardwareSampleRate);
 #endif
index f1d689d..630bdc3 100644 (file)
@@ -46,7 +46,7 @@ public:
     virtual void initialize();
     virtual void uninitialize();
     
-    double sampleRate() const { return m_destination->sampleRate(); }
+    float sampleRate() const { return m_destination->sampleRate(); }
 
     virtual void startRendering();
     
index 8e0b40e..aeda189 100644 (file)
@@ -33,8 +33,8 @@
 
 using namespace std;
   
-const double DefaultMaxDelayTime = 1.0;
-const double SmoothingTimeConstant = 0.020; // 20ms
+const float DefaultMaxDelayTime = 1;
+const float SmoothingTimeConstant = 0.020f; // 20ms
   
 namespace WebCore {
 
@@ -54,7 +54,7 @@ DelayDSPKernel::DelayDSPKernel(DelayProcessor* processor)
     m_smoothingRate = AudioUtilities::discreteTimeConstantForSampleRate(SmoothingTimeConstant, processor->sampleRate());
 }
 
-DelayDSPKernel::DelayDSPKernel(double maxDelayTime, double sampleRate)
+DelayDSPKernel::DelayDSPKernel(double maxDelayTime, float sampleRate)
     : AudioDSPKernel(sampleRate)
     , m_maxDelayTime(maxDelayTime)
     , m_writeIndex(0)
@@ -88,7 +88,7 @@ void DelayDSPKernel::process(const float* source, float* destination, size_t fra
     if (!source || !destination)
         return;
         
-    double sampleRate = this->sampleRate();
+    float sampleRate = this->sampleRate();
     double delayTime = delayProcessor() ? delayProcessor()->delayTime()->value() : m_desiredDelayFrames / sampleRate;
 
     // Make sure the delay time is in a valid range.
index 2ae36cb..79a3956 100644 (file)
@@ -36,7 +36,7 @@ class DelayProcessor;
 class DelayDSPKernel : public AudioDSPKernel {
 public:  
     DelayDSPKernel(DelayProcessor*);
-    DelayDSPKernel(double maxDelayTime, double sampleRate);
+    DelayDSPKernel(double maxDelayTime, float sampleRate);
     
     virtual void process(const float* source, float* destination, size_t framesToProcess);
     virtual void reset();
index b578d60..2a525dd 100644 (file)
@@ -30,7 +30,7 @@
 
 namespace WebCore {
 
-DelayNode::DelayNode(AudioContext* context, double sampleRate)
+DelayNode::DelayNode(AudioContext* context, float sampleRate)
     : AudioBasicProcessorNode(context, sampleRate)
 {
     m_processor = adoptPtr(new DelayProcessor(sampleRate, 1));    
index 93ad227..3d58a89 100644 (file)
@@ -35,7 +35,7 @@ class AudioParam;
 
 class DelayNode : public AudioBasicProcessorNode {
 public:
-    static PassRefPtr<DelayNode> create(AudioContext* context, double sampleRate)
+    static PassRefPtr<DelayNode> create(AudioContext* context, float sampleRate)
     {
         return adoptRef(new DelayNode(context, sampleRate));      
     }
@@ -43,7 +43,7 @@ public:
     AudioParam* delayTime();
 
 private:
-    DelayNode(AudioContext*, double sampleRate);
+    DelayNode(AudioContext*, float sampleRate);
 
     DelayProcessor* delayProcessor() { return static_cast<DelayProcessor*>(processor()); }
 };
index 5fdc8df..8ed3e43 100644 (file)
@@ -32,7 +32,7 @@
 
 namespace WebCore {
 
-DelayProcessor::DelayProcessor(double sampleRate, unsigned numberOfChannels)
+DelayProcessor::DelayProcessor(float sampleRate, unsigned numberOfChannels)
     : AudioDSPKernelProcessor(sampleRate, numberOfChannels)
 {
     m_delayTime = AudioParam::create("delayTime", 0.0, 0.0, 1.0);
index 4844c4b..15428ce 100644 (file)
@@ -37,7 +37,7 @@ class AudioDSPKernel;
     
 class DelayProcessor : public AudioDSPKernelProcessor {
 public:
-    DelayProcessor(double sampleRate, unsigned numberOfChannels);
+    DelayProcessor(float sampleRate, unsigned numberOfChannels);
     virtual ~DelayProcessor();
     
     virtual PassOwnPtr<AudioDSPKernel> createKernel();
index 6feb296..ea8b589 100644 (file)
@@ -35,7 +35,7 @@
 
 namespace WebCore {
 
-DynamicsCompressorNode::DynamicsCompressorNode(AudioContext* context, double sampleRate)
+DynamicsCompressorNode::DynamicsCompressorNode(AudioContext* context, float sampleRate)
     : AudioNode(context, sampleRate)
 {
     addInput(adoptPtr(new AudioNodeInput(this)));
index 7ae75ea..d718ab3 100644 (file)
@@ -34,7 +34,7 @@ class DynamicsCompressor;
     
 class DynamicsCompressorNode : public AudioNode {
 public:
-    static PassRefPtr<DynamicsCompressorNode> create(AudioContext* context, double sampleRate)
+    static PassRefPtr<DynamicsCompressorNode> create(AudioContext* context, float sampleRate)
     {
         return adoptRef(new DynamicsCompressorNode(context, sampleRate));      
     }
@@ -48,7 +48,7 @@ public:
     virtual void uninitialize();
 
 private:
-    DynamicsCompressorNode(AudioContext*, double sampleRate);
+    DynamicsCompressorNode(AudioContext*, float sampleRate);
 
     OwnPtr<DynamicsCompressor> m_dynamicsCompressor;
 };
index 27946dd..520785f 100644 (file)
@@ -30,7 +30,7 @@
 
 namespace WebCore {
 
-HighPass2FilterNode::HighPass2FilterNode(AudioContext* context, double sampleRate)
+HighPass2FilterNode::HighPass2FilterNode(AudioContext* context, float sampleRate)
     : AudioBasicProcessorNode(context, sampleRate)
 {
     m_processor = adoptPtr(new BiquadProcessor(BiquadProcessor::HighPass, sampleRate, 1, false));
index be0beb6..bfd76a0 100644 (file)
@@ -34,7 +34,7 @@ class AudioParam;
     
 class HighPass2FilterNode : public AudioBasicProcessorNode {
 public:
-    static PassRefPtr<HighPass2FilterNode> create(AudioContext* context, double sampleRate)
+    static PassRefPtr<HighPass2FilterNode> create(AudioContext* context, float sampleRate)
     {
         return adoptRef(new HighPass2FilterNode(context, sampleRate));      
     }
@@ -43,7 +43,7 @@ public:
     AudioParam* resonance() { return biquadProcessor()->parameter2(); }
     
 private:
-    HighPass2FilterNode(AudioContext*, double sampleRate);
+    HighPass2FilterNode(AudioContext*, float sampleRate);
 
     BiquadProcessor* biquadProcessor() { return static_cast<BiquadProcessor*>(processor()); }
 };
index 487299b..05e21b7 100644 (file)
@@ -42,12 +42,12 @@ namespace WebCore {
 
 const size_t DefaultBufferSize = 4096;
 
-PassRefPtr<JavaScriptAudioNode> JavaScriptAudioNode::create(AudioContext* context, double sampleRate, size_t bufferSize, unsigned numberOfInputs, unsigned numberOfOutputs)
+PassRefPtr<JavaScriptAudioNode> JavaScriptAudioNode::create(AudioContext* context, float sampleRate, size_t bufferSize, unsigned numberOfInputs, unsigned numberOfOutputs)
 {
     return adoptRef(new JavaScriptAudioNode(context, sampleRate, bufferSize, numberOfInputs, numberOfOutputs));
 }
 
-JavaScriptAudioNode::JavaScriptAudioNode(AudioContext* context, double sampleRate, size_t bufferSize, unsigned numberOfInputs, unsigned numberOfOutputs)
+JavaScriptAudioNode::JavaScriptAudioNode(AudioContext* context, float sampleRate, size_t bufferSize, unsigned numberOfInputs, unsigned numberOfOutputs)
     : AudioNode(context, sampleRate)
     , m_doubleBufferIndex(0)
     , m_doubleBufferIndexForEvent(0)
@@ -96,7 +96,7 @@ void JavaScriptAudioNode::initialize()
     if (isInitialized())
         return;
 
-    double sampleRate = context()->sampleRate();
+    float sampleRate = context()->sampleRate();
 
     // Create double buffers on both the input and output sides.
     // These AudioBuffers will be directly accessed in the main thread by JavaScript.
index e99a25d..5a299c4 100644 (file)
@@ -52,7 +52,7 @@ public:
     // This value controls how frequently the onaudioprocess event handler is called and how many sample-frames need to be processed each call.
     // Lower numbers for bufferSize will result in a lower (better) latency. Higher numbers will be necessary to avoid audio breakup and glitches.
     // The value chosen must carefully balance between latency and audio quality.
-    static PassRefPtr<JavaScriptAudioNode> create(AudioContext*, double sampleRate, size_t bufferSize, unsigned numberOfInputs = 1, unsigned numberOfOutputs = 1);
+    static PassRefPtr<JavaScriptAudioNode> create(AudioContext*, float sampleRate, size_t bufferSize, unsigned numberOfInputs = 1, unsigned numberOfOutputs = 1);
 
     virtual ~JavaScriptAudioNode();
 
@@ -77,7 +77,7 @@ public:
     using AudioNode::deref;
     
 private:
-    JavaScriptAudioNode(AudioContext*, double sampleRate, size_t bufferSize, unsigned numberOfInputs, unsigned numberOfOutputs);
+    JavaScriptAudioNode(AudioContext*, float sampleRate, size_t bufferSize, unsigned numberOfInputs, unsigned numberOfOutputs);
 
     static void fireProcessEventDispatch(void* userData);
     void fireProcessEvent();
index ac2de1f..e2d669a 100644 (file)
@@ -30,7 +30,7 @@
 
 namespace WebCore {
 
-LowPass2FilterNode::LowPass2FilterNode(AudioContext* context, double sampleRate)
+LowPass2FilterNode::LowPass2FilterNode(AudioContext* context, float sampleRate)
     : AudioBasicProcessorNode(context, sampleRate)
 {
     m_processor = adoptPtr(new BiquadProcessor(BiquadProcessor::LowPass, sampleRate, 1, false));
index 43d7051..3342c6f 100644 (file)
@@ -34,7 +34,7 @@ class AudioParam;
     
 class LowPass2FilterNode : public AudioBasicProcessorNode {
 public:
-    static PassRefPtr<LowPass2FilterNode> create(AudioContext* context, double sampleRate)
+    static PassRefPtr<LowPass2FilterNode> create(AudioContext* context, float sampleRate)
     {
         return adoptRef(new LowPass2FilterNode(context, sampleRate));      
     }
@@ -43,7 +43,7 @@ public:
     AudioParam* resonance() { return biquadProcessor()->parameter2(); }
     
 private:
-    LowPass2FilterNode(AudioContext*, double sampleRate);
+    LowPass2FilterNode(AudioContext*, float sampleRate);
 
     BiquadProcessor* biquadProcessor() { return static_cast<BiquadProcessor*>(processor()); }
 };
index 714c120..c4d567a 100644 (file)
@@ -49,7 +49,7 @@ public:
     virtual void initialize();
     virtual void uninitialize();
     
-    double sampleRate() const { return m_renderTarget->sampleRate(); }
+    float sampleRate() const { return m_renderTarget->sampleRate(); }
 
     void startRendering();
     
index 449c84a..8d74a67 100644 (file)
@@ -33,7 +33,7 @@
 
 namespace WebCore {
 
-RealtimeAnalyserNode::RealtimeAnalyserNode(AudioContext* context, double sampleRate)
+RealtimeAnalyserNode::RealtimeAnalyserNode(AudioContext* context, float sampleRate)
     : AudioNode(context, sampleRate)
 {
     addInput(adoptPtr(new AudioNodeInput(this)));
index d00a5b6..6c7add9 100644 (file)
@@ -32,7 +32,7 @@ namespace WebCore {
 
 class RealtimeAnalyserNode : public AudioNode {
 public:
-    static PassRefPtr<RealtimeAnalyserNode> create(AudioContext* context, double sampleRate)
+    static PassRefPtr<RealtimeAnalyserNode> create(AudioContext* context, float sampleRate)
     {
         return adoptRef(new RealtimeAnalyserNode(context, sampleRate));      
     }
@@ -64,7 +64,7 @@ public:
     void getByteTimeDomainData(Uint8Array* array) { m_analyser.getByteTimeDomainData(array); }
 
 private:
-    RealtimeAnalyserNode(AudioContext*, double sampleRate);
+    RealtimeAnalyserNode(AudioContext*, float sampleRate);
 
     RealtimeAnalyser m_analyser;
 };
index 416b722..5f3cf34 100644 (file)
@@ -61,7 +61,7 @@ void WaveShaperDSPKernel::process(const float* source, float* destination, size_
         const float input = source[i];
 
         // Calculate an index based on input -1 -> +1 with 0 being at the center of the curve data.
-        int index = curveLength * 0.5 * (input + 1);
+        int index = (curveLength * (input + 1)) / 2;
 
         // Clip index to the input range of the curve.
         // This takes care of input outside of nominal range -1 -> +1
index abd522c..f7571de 100644 (file)
@@ -32,7 +32,7 @@
 
 namespace WebCore {
     
-WaveShaperProcessor::WaveShaperProcessor(double sampleRate, size_t numberOfChannels)
+WaveShaperProcessor::WaveShaperProcessor(float sampleRate, size_t numberOfChannels)
     : AudioDSPKernelProcessor(sampleRate, numberOfChannels)
 {
 }
index 86a188d..eb67260 100644 (file)
@@ -38,7 +38,7 @@ namespace WebCore {
 
 class WaveShaperProcessor : public AudioDSPKernelProcessor {
 public:
-    WaveShaperProcessor(double sampleRate, size_t numberOfChannels);
+    WaveShaperProcessor(float sampleRate, size_t numberOfChannels);
 
     virtual ~WaveShaperProcessor();
 
index 0a66a8e..e4e83b2 100644 (file)
@@ -1,3 +1,20 @@
+2011-10-05  Jer Noble  <jer.noble@apple.com>
+
+        WEB_AUDIO does not compile on Leopard 32-bit.
+        https://bugs.webkit.org/show_bug.cgi?id=69292
+
+        Reviewed by Simon Fraser.
+
+        Platform-independent portions of WEB_AUDIO have changed from double -> float, and 
+        platform-specific subclasses must change as well.
+
+        * src/AudioDestinationChromium.cpp:
+        (WebCore::AudioDestination::create):
+        (WebCore::AudioDestinationChromium::AudioDestinationChromium):
+        (WebCore::AudioDestination::hardwareSampleRate):
+        * src/AudioDestinationChromium.h:
+        (WebCore::AudioDestinationChromium::sampleRate):
+
 2011-10-05  James Robinson  <jamesr@chromium.org>
 
         [chromium] Hook WebCompositor interface for input events up to the compositor proper
index 2be1ff5..9499a1c 100644 (file)
@@ -50,12 +50,12 @@ const size_t maximumCallbackBufferSize = 16384;
 const unsigned numberOfChannels = 2;
 
 // Factory method: Chromium-implementation
-PassOwnPtr<AudioDestination> AudioDestination::create(AudioSourceProvider& provider, double sampleRate)
+PassOwnPtr<AudioDestination> AudioDestination::create(AudioSourceProvider& provider, float sampleRate)
 {
     return adoptPtr(new AudioDestinationChromium(provider, sampleRate));
 }
 
-AudioDestinationChromium::AudioDestinationChromium(AudioSourceProvider& provider, double sampleRate)
+AudioDestinationChromium::AudioDestinationChromium(AudioSourceProvider& provider, float sampleRate)
     : m_provider(provider)
     , m_renderBus(numberOfChannels, renderBufferSize, false)
     , m_sampleRate(sampleRate)
@@ -101,9 +101,9 @@ void AudioDestinationChromium::stop()
     }
 }
 
-double AudioDestination::hardwareSampleRate()
+float AudioDestination::hardwareSampleRate()
 {
-    return webKitPlatformSupport()->audioHardwareSampleRate();
+    return static_cast<float>(webKitPlatformSupport()->audioHardwareSampleRate());
 }
 
 // Pulls on our provider to get the rendered audio stream.
index ecfc4b0..13a3f52 100644 (file)
@@ -42,14 +42,14 @@ namespace WebCore {
 
 class AudioDestinationChromium : public AudioDestination, public WebKit::WebAudioDevice::RenderCallback {
 public:
-    AudioDestinationChromium(AudioSourceProvider&, double sampleRate);
+    AudioDestinationChromium(AudioSourceProvider&, float sampleRate);
     virtual ~AudioDestinationChromium();
 
     virtual void start();
     virtual void stop();
     bool isPlaying() { return m_isPlaying; }
 
-    double sampleRate() const { return m_sampleRate; }
+    float sampleRate() const { return m_sampleRate; }
 
     // WebKit::WebAudioDevice::RenderCallback
     virtual void render(const WebKit::WebVector<float*>& audioData, size_t numberOfFrames);
@@ -57,7 +57,7 @@ public:
 private:
     AudioSourceProvider& m_provider;
     AudioBus m_renderBus;
-    double m_sampleRate;
+    float m_sampleRate;
     bool m_isPlaying;
     OwnPtr<WebKit::WebAudioDevice> m_audioDevice;
     size_t m_callbackBufferSize;