https://bugs.webkit.org/show_bug.cgi?id=69292
Reviewed by Simon Fraser.
Source/WebCore:
No new tests; covered by all existing audio tests.
Use of float and double within the WEB_AUDIO implementation have been harmonized, with most
calculations done using floats, with narrowPrecisionToFloat() added when necessary to
narrow double results down to floats, and with float constants initialized with float values:
* platform/audio/AudioBus.cpp:
(WebCore::AudioBus::AudioBus):
(WebCore::AudioBus::createByMixingToMono):
* platform/audio/AudioBus.h:
(WebCore::AudioBus::sampleRate):
(WebCore::AudioBus::setSampleRate):
* platform/audio/AudioDSPKernel.h:
(WebCore::AudioDSPKernel::AudioDSPKernel):
(WebCore::AudioDSPKernel::sampleRate):
* platform/audio/AudioDSPKernelProcessor.cpp:
(WebCore::AudioDSPKernelProcessor::AudioDSPKernelProcessor):
* platform/audio/AudioDSPKernelProcessor.h:
* platform/audio/AudioDestination.h:
* platform/audio/AudioFileReader.h:
* platform/audio/AudioProcessor.h:
(WebCore::AudioProcessor::AudioProcessor):
(WebCore::AudioProcessor::sampleRate):
* platform/audio/AudioUtilities.cpp:
(WebCore::AudioUtilities::decibelsToLinear):
(WebCore::AudioUtilities::linearToDecibels):
(WebCore::AudioUtilities::discreteTimeConstantForSampleRate):
* platform/audio/AudioUtilities.h:
* platform/audio/DynamicsCompressor.cpp:
(WebCore::DynamicsCompressor::DynamicsCompressor):
(WebCore::DynamicsCompressor::initializeParameters):
(WebCore::DynamicsCompressor::parameterValue):
(WebCore::DynamicsCompressor::setEmphasisStageParameters):
(WebCore::DynamicsCompressor::process):
* platform/audio/DynamicsCompressor.h:
(WebCore::DynamicsCompressor::sampleRate):
(WebCore::DynamicsCompressor::nyquist):
* platform/audio/DynamicsCompressorKernel.cpp:
(WebCore::saturate):
(WebCore::DynamicsCompressorKernel::DynamicsCompressorKernel):
(WebCore::DynamicsCompressorKernel::process):
* platform/audio/DynamicsCompressorKernel.h:
* platform/audio/EqualPowerPanner.cpp:
(WebCore::EqualPowerPanner::EqualPowerPanner):
* platform/audio/EqualPowerPanner.h:
* platform/audio/HRTFDatabase.cpp:
(WebCore::HRTFDatabase::create):
(WebCore::HRTFDatabase::HRTFDatabase):
* platform/audio/HRTFDatabase.h:
(WebCore::HRTFDatabase::sampleRate):
* platform/audio/HRTFDatabaseLoader.cpp:
(WebCore::HRTFDatabaseLoader::createAndLoadAsynchronouslyIfNecessary):
(WebCore::HRTFDatabaseLoader::HRTFDatabaseLoader):
* platform/audio/HRTFDatabaseLoader.h:
(WebCore::HRTFDatabaseLoader::databaseSampleRate):
* platform/audio/HRTFElevation.cpp:
(WebCore::HRTFElevation::calculateSymmetricKernelsForAzimuthElevation):
(WebCore::HRTFElevation::calculateKernelsForAzimuthElevation):
(WebCore::HRTFElevation::createForSubject):
(WebCore::HRTFElevation::createByInterpolatingSlices):
* platform/audio/HRTFElevation.h:
(WebCore::HRTFElevation::sampleRate):
(WebCore::HRTFElevation::HRTFElevation):
* platform/audio/HRTFKernel.cpp:
(WebCore::extractAverageGroupDelay):
(WebCore::HRTFKernel::HRTFKernel):
(WebCore::HRTFKernel::createInterpolatedKernel):
* platform/audio/HRTFKernel.h:
(WebCore::HRTFKernel::create):
(WebCore::HRTFKernel::frameDelay):
(WebCore::HRTFKernel::sampleRate):
(WebCore::HRTFKernel::HRTFKernel):
* platform/audio/HRTFPanner.cpp:
(WebCore::HRTFPanner::HRTFPanner):
(WebCore::HRTFPanner::fftSizeForSampleRate):
* platform/audio/HRTFPanner.h:
(WebCore::HRTFPanner::sampleRate):
* platform/audio/Panner.cpp:
(WebCore::Panner::create):
* platform/audio/Panner.h:
* platform/audio/chromium/AudioBusChromium.cpp:
(WebCore::AudioBus::loadPlatformResource):
* platform/audio/mac/AudioBusMac.mm:
(WebCore::AudioBus::loadPlatformResource):
* platform/audio/mac/AudioDestinationMac.cpp:
(WebCore::AudioDestination::create):
(WebCore::AudioDestination::hardwareSampleRate):
(WebCore::AudioDestinationMac::AudioDestinationMac):
* platform/audio/mac/AudioDestinationMac.h:
(WebCore::AudioDestinationMac::sampleRate):
* platform/audio/mac/AudioFileReaderMac.cpp:
(WebCore::AudioFileReader::createBus):
(WebCore::createBusFromAudioFile):
(WebCore::createBusFromInMemoryAudioFile):
* platform/audio/mac/AudioFileReaderMac.h:
* webaudio/AsyncAudioDecoder.cpp:
(WebCore::AsyncAudioDecoder::decodeAsync):
(WebCore::AsyncAudioDecoder::DecodingTask::create):
(WebCore::AsyncAudioDecoder::DecodingTask::DecodingTask):
* webaudio/AsyncAudioDecoder.h:
(WebCore::AsyncAudioDecoder::DecodingTask::sampleRate):
* webaudio/AudioBasicProcessorNode.cpp:
(WebCore::AudioBasicProcessorNode::AudioBasicProcessorNode):
* webaudio/AudioBasicProcessorNode.h:
* webaudio/AudioBuffer.cpp:
(WebCore::AudioBuffer::create):
(WebCore::AudioBuffer::createFromAudioFileData):
(WebCore::AudioBuffer::AudioBuffer):
* webaudio/AudioBuffer.h:
(WebCore::AudioBuffer::sampleRate):
* webaudio/AudioBufferSourceNode.cpp:
(WebCore::AudioBufferSourceNode::create):
(WebCore::AudioBufferSourceNode::AudioBufferSourceNode):
(WebCore::AudioBufferSourceNode::process):
(WebCore::AudioBufferSourceNode::renderFromBuffer):
* webaudio/AudioBufferSourceNode.h:
* webaudio/AudioChannelMerger.cpp:
(WebCore::AudioChannelMerger::AudioChannelMerger):
* webaudio/AudioChannelMerger.h:
(WebCore::AudioChannelMerger::create):
* webaudio/AudioChannelSplitter.cpp:
(WebCore::AudioChannelSplitter::AudioChannelSplitter):
* webaudio/AudioChannelSplitter.h:
(WebCore::AudioChannelSplitter::create):
* webaudio/AudioContext.cpp:
(WebCore::AudioContext::createOfflineContext):
(WebCore::AudioContext::AudioContext):
(WebCore::AudioContext::createBuffer):
* webaudio/AudioContext.h:
(WebCore::AudioContext::sampleRate):
* webaudio/AudioDestinationNode.cpp:
(WebCore::AudioDestinationNode::AudioDestinationNode):
* webaudio/AudioDestinationNode.h:
* webaudio/AudioGainNode.cpp:
(WebCore::AudioGainNode::AudioGainNode):
* webaudio/AudioGainNode.h:
(WebCore::AudioGainNode::create):
* webaudio/AudioListener.cpp:
(WebCore::AudioListener::AudioListener):
* webaudio/AudioListener.h:
(WebCore::AudioListener::setPosition):
(WebCore::AudioListener::setOrientation):
(WebCore::AudioListener::setVelocity):
* webaudio/AudioNode.cpp:
(WebCore::AudioNode::AudioNode):
* webaudio/AudioNode.h:
(WebCore::AudioNode::sampleRate):
* webaudio/AudioPannerNode.cpp:
(WebCore::AudioPannerNode::AudioPannerNode):
(WebCore::AudioPannerNode::getAzimuthElevation):
* webaudio/AudioPannerNode.h:
(WebCore::AudioPannerNode::create):
* webaudio/AudioParam.cpp:
(WebCore::AudioParam::value):
(WebCore::AudioParam::smoothedValue):
(WebCore::AudioParam::smooth):
(WebCore::AudioParam::calculateSampleAccurateValues):
* webaudio/AudioParamTimeline.cpp:
(WebCore::AudioParamTimeline::valueForContextTime):
(WebCore::timeToSampleFrame):
(WebCore::AudioParamTimeline::valuesForTimeRangeImpl):
* webaudio/AudioSourceNode.h:
(WebCore::AudioSourceNode::AudioSourceNode):
* webaudio/BiquadFilterNode.cpp:
(WebCore::BiquadFilterNode::BiquadFilterNode):
* webaudio/BiquadFilterNode.h:
(WebCore::BiquadFilterNode::create):
* webaudio/BiquadProcessor.cpp:
(WebCore::BiquadProcessor::BiquadProcessor):
* webaudio/BiquadProcessor.h:
* webaudio/ConvolverNode.cpp:
(WebCore::ConvolverNode::ConvolverNode):
* webaudio/ConvolverNode.h:
(WebCore::ConvolverNode::create):
* webaudio/DefaultAudioDestinationNode.cpp:
(WebCore::DefaultAudioDestinationNode::initialize):
* webaudio/DefaultAudioDestinationNode.h:
(WebCore::DefaultAudioDestinationNode::sampleRate):
* webaudio/DelayDSPKernel.cpp:
(WebCore::DelayDSPKernel::DelayDSPKernel):
(WebCore::DelayDSPKernel::process):
* webaudio/DelayDSPKernel.h:
* webaudio/DelayNode.cpp:
(WebCore::DelayNode::DelayNode):
* webaudio/DelayNode.h:
(WebCore::DelayNode::create):
* webaudio/DelayProcessor.cpp:
(WebCore::DelayProcessor::DelayProcessor):
* webaudio/DelayProcessor.h:
* webaudio/DynamicsCompressorNode.cpp:
(WebCore::DynamicsCompressorNode::DynamicsCompressorNode):
* webaudio/DynamicsCompressorNode.h:
(WebCore::DynamicsCompressorNode::create):
* webaudio/HighPass2FilterNode.cpp:
(WebCore::HighPass2FilterNode::HighPass2FilterNode):
* webaudio/HighPass2FilterNode.h:
(WebCore::HighPass2FilterNode::create):
* webaudio/JavaScriptAudioNode.cpp:
(WebCore::JavaScriptAudioNode::create):
(WebCore::JavaScriptAudioNode::JavaScriptAudioNode):
(WebCore::JavaScriptAudioNode::initialize):
* webaudio/JavaScriptAudioNode.h:
* webaudio/LowPass2FilterNode.cpp:
(WebCore::LowPass2FilterNode::LowPass2FilterNode):
* webaudio/LowPass2FilterNode.h:
(WebCore::LowPass2FilterNode::create):
* webaudio/OfflineAudioDestinationNode.h:
(WebCore::OfflineAudioDestinationNode::sampleRate):
* webaudio/RealtimeAnalyserNode.cpp:
(WebCore::RealtimeAnalyserNode::RealtimeAnalyserNode):
* webaudio/RealtimeAnalyserNode.h:
(WebCore::RealtimeAnalyserNode::create):
* webaudio/WaveShaperDSPKernel.cpp:
(WebCore::WaveShaperDSPKernel::process):
* webaudio/WaveShaperProcessor.cpp:
(WebCore::WaveShaperProcessor::WaveShaperProcessor):
* webaudio/WaveShaperProcessor.h:
Source/WebKit/chromium:
Platform-independent portions of WEB_AUDIO have changed from double -> float, and
platform-specific subclasses must change as well.
* src/AudioDestinationChromium.cpp:
(WebCore::AudioDestination::create):
(WebCore::AudioDestinationChromium::AudioDestinationChromium):
(WebCore::AudioDestination::hardwareSampleRate):
* src/AudioDestinationChromium.h:
(WebCore::AudioDestinationChromium::sampleRate):
git-svn-id: https://svn.webkit.org/repository/webkit/trunk@96745
268f45cc-cd09-0410-ab3c-
d52691b4dbfc
+2011-10-05 Jer Noble <jer.noble@apple.com>
+
+ WEB_AUDIO does not compile on Leopard 32-bit.
+ https://bugs.webkit.org/show_bug.cgi?id=69292
+
+ Reviewed by Simon Fraser.
+
+ No new tests; covered by all existing audio tests.
+
+ Use of float and double within the WEB_AUDIO implementation have been harmonized, with most
+ calculations done using floats, with narrowPrecisionToFloat() added when necessary to
+ narrow double results down to floats, and with float constants initialized with float values:
+ * platform/audio/AudioBus.cpp:
+ (WebCore::AudioBus::AudioBus):
+ (WebCore::AudioBus::createByMixingToMono):
+ * platform/audio/AudioBus.h:
+ (WebCore::AudioBus::sampleRate):
+ (WebCore::AudioBus::setSampleRate):
+ * platform/audio/AudioDSPKernel.h:
+ (WebCore::AudioDSPKernel::AudioDSPKernel):
+ (WebCore::AudioDSPKernel::sampleRate):
+ * platform/audio/AudioDSPKernelProcessor.cpp:
+ (WebCore::AudioDSPKernelProcessor::AudioDSPKernelProcessor):
+ * platform/audio/AudioDSPKernelProcessor.h:
+ * platform/audio/AudioDestination.h:
+ * platform/audio/AudioFileReader.h:
+ * platform/audio/AudioProcessor.h:
+ (WebCore::AudioProcessor::AudioProcessor):
+ (WebCore::AudioProcessor::sampleRate):
+ * platform/audio/AudioUtilities.cpp:
+ (WebCore::AudioUtilities::decibelsToLinear):
+ (WebCore::AudioUtilities::linearToDecibels):
+ (WebCore::AudioUtilities::discreteTimeConstantForSampleRate):
+ * platform/audio/AudioUtilities.h:
+ * platform/audio/DynamicsCompressor.cpp:
+ (WebCore::DynamicsCompressor::DynamicsCompressor):
+ (WebCore::DynamicsCompressor::initializeParameters):
+ (WebCore::DynamicsCompressor::parameterValue):
+ (WebCore::DynamicsCompressor::setEmphasisStageParameters):
+ (WebCore::DynamicsCompressor::process):
+ * platform/audio/DynamicsCompressor.h:
+ (WebCore::DynamicsCompressor::sampleRate):
+ (WebCore::DynamicsCompressor::nyquist):
+ * platform/audio/DynamicsCompressorKernel.cpp:
+ (WebCore::saturate):
+ (WebCore::DynamicsCompressorKernel::DynamicsCompressorKernel):
+ (WebCore::DynamicsCompressorKernel::process):
+ * platform/audio/DynamicsCompressorKernel.h:
+ * platform/audio/EqualPowerPanner.cpp:
+ (WebCore::EqualPowerPanner::EqualPowerPanner):
+ * platform/audio/EqualPowerPanner.h:
+ * platform/audio/HRTFDatabase.cpp:
+ (WebCore::HRTFDatabase::create):
+ (WebCore::HRTFDatabase::HRTFDatabase):
+ * platform/audio/HRTFDatabase.h:
+ (WebCore::HRTFDatabase::sampleRate):
+ * platform/audio/HRTFDatabaseLoader.cpp:
+ (WebCore::HRTFDatabaseLoader::createAndLoadAsynchronouslyIfNecessary):
+ (WebCore::HRTFDatabaseLoader::HRTFDatabaseLoader):
+ * platform/audio/HRTFDatabaseLoader.h:
+ (WebCore::HRTFDatabaseLoader::databaseSampleRate):
+ * platform/audio/HRTFElevation.cpp:
+ (WebCore::HRTFElevation::calculateSymmetricKernelsForAzimuthElevation):
+ (WebCore::HRTFElevation::calculateKernelsForAzimuthElevation):
+ (WebCore::HRTFElevation::createForSubject):
+ (WebCore::HRTFElevation::createByInterpolatingSlices):
+ * platform/audio/HRTFElevation.h:
+ (WebCore::HRTFElevation::sampleRate):
+ (WebCore::HRTFElevation::HRTFElevation):
+ * platform/audio/HRTFKernel.cpp:
+ (WebCore::extractAverageGroupDelay):
+ (WebCore::HRTFKernel::HRTFKernel):
+ (WebCore::HRTFKernel::createInterpolatedKernel):
+ * platform/audio/HRTFKernel.h:
+ (WebCore::HRTFKernel::create):
+ (WebCore::HRTFKernel::frameDelay):
+ (WebCore::HRTFKernel::sampleRate):
+ (WebCore::HRTFKernel::HRTFKernel):
+ * platform/audio/HRTFPanner.cpp:
+ (WebCore::HRTFPanner::HRTFPanner):
+ (WebCore::HRTFPanner::fftSizeForSampleRate):
+ * platform/audio/HRTFPanner.h:
+ (WebCore::HRTFPanner::sampleRate):
+ * platform/audio/Panner.cpp:
+ (WebCore::Panner::create):
+ * platform/audio/Panner.h:
+ * platform/audio/chromium/AudioBusChromium.cpp:
+ (WebCore::AudioBus::loadPlatformResource):
+ * platform/audio/mac/AudioBusMac.mm:
+ (WebCore::AudioBus::loadPlatformResource):
+ * platform/audio/mac/AudioDestinationMac.cpp:
+ (WebCore::AudioDestination::create):
+ (WebCore::AudioDestination::hardwareSampleRate):
+ (WebCore::AudioDestinationMac::AudioDestinationMac):
+ * platform/audio/mac/AudioDestinationMac.h:
+ (WebCore::AudioDestinationMac::sampleRate):
+ * platform/audio/mac/AudioFileReaderMac.cpp:
+ (WebCore::AudioFileReader::createBus):
+ (WebCore::createBusFromAudioFile):
+ (WebCore::createBusFromInMemoryAudioFile):
+ * platform/audio/mac/AudioFileReaderMac.h:
+ * webaudio/AsyncAudioDecoder.cpp:
+ (WebCore::AsyncAudioDecoder::decodeAsync):
+ (WebCore::AsyncAudioDecoder::DecodingTask::create):
+ (WebCore::AsyncAudioDecoder::DecodingTask::DecodingTask):
+ * webaudio/AsyncAudioDecoder.h:
+ (WebCore::AsyncAudioDecoder::DecodingTask::sampleRate):
+ * webaudio/AudioBasicProcessorNode.cpp:
+ (WebCore::AudioBasicProcessorNode::AudioBasicProcessorNode):
+ * webaudio/AudioBasicProcessorNode.h:
+ * webaudio/AudioBuffer.cpp:
+ (WebCore::AudioBuffer::create):
+ (WebCore::AudioBuffer::createFromAudioFileData):
+ (WebCore::AudioBuffer::AudioBuffer):
+ * webaudio/AudioBuffer.h:
+ (WebCore::AudioBuffer::sampleRate):
+ * webaudio/AudioBufferSourceNode.cpp:
+ (WebCore::AudioBufferSourceNode::create):
+ (WebCore::AudioBufferSourceNode::AudioBufferSourceNode):
+ (WebCore::AudioBufferSourceNode::process):
+ (WebCore::AudioBufferSourceNode::renderFromBuffer):
+ * webaudio/AudioBufferSourceNode.h:
+ * webaudio/AudioChannelMerger.cpp:
+ (WebCore::AudioChannelMerger::AudioChannelMerger):
+ * webaudio/AudioChannelMerger.h:
+ (WebCore::AudioChannelMerger::create):
+ * webaudio/AudioChannelSplitter.cpp:
+ (WebCore::AudioChannelSplitter::AudioChannelSplitter):
+ * webaudio/AudioChannelSplitter.h:
+ (WebCore::AudioChannelSplitter::create):
+ * webaudio/AudioContext.cpp:
+ (WebCore::AudioContext::createOfflineContext):
+ (WebCore::AudioContext::AudioContext):
+ (WebCore::AudioContext::createBuffer):
+ * webaudio/AudioContext.h:
+ (WebCore::AudioContext::sampleRate):
+ * webaudio/AudioDestinationNode.cpp:
+ (WebCore::AudioDestinationNode::AudioDestinationNode):
+ * webaudio/AudioDestinationNode.h:
+ * webaudio/AudioGainNode.cpp:
+ (WebCore::AudioGainNode::AudioGainNode):
+ * webaudio/AudioGainNode.h:
+ (WebCore::AudioGainNode::create):
+ * webaudio/AudioListener.cpp:
+ (WebCore::AudioListener::AudioListener):
+ * webaudio/AudioListener.h:
+ (WebCore::AudioListener::setPosition):
+ (WebCore::AudioListener::setOrientation):
+ (WebCore::AudioListener::setVelocity):
+ * webaudio/AudioNode.cpp:
+ (WebCore::AudioNode::AudioNode):
+ * webaudio/AudioNode.h:
+ (WebCore::AudioNode::sampleRate):
+ * webaudio/AudioPannerNode.cpp:
+ (WebCore::AudioPannerNode::AudioPannerNode):
+ (WebCore::AudioPannerNode::getAzimuthElevation):
+ * webaudio/AudioPannerNode.h:
+ (WebCore::AudioPannerNode::create):
+ * webaudio/AudioParam.cpp:
+ (WebCore::AudioParam::value):
+ (WebCore::AudioParam::smoothedValue):
+ (WebCore::AudioParam::smooth):
+ (WebCore::AudioParam::calculateSampleAccurateValues):
+ * webaudio/AudioParamTimeline.cpp:
+ (WebCore::AudioParamTimeline::valueForContextTime):
+ (WebCore::timeToSampleFrame):
+ (WebCore::AudioParamTimeline::valuesForTimeRangeImpl):
+ * webaudio/AudioSourceNode.h:
+ (WebCore::AudioSourceNode::AudioSourceNode):
+ * webaudio/BiquadFilterNode.cpp:
+ (WebCore::BiquadFilterNode::BiquadFilterNode):
+ * webaudio/BiquadFilterNode.h:
+ (WebCore::BiquadFilterNode::create):
+ * webaudio/BiquadProcessor.cpp:
+ (WebCore::BiquadProcessor::BiquadProcessor):
+ * webaudio/BiquadProcessor.h:
+ * webaudio/ConvolverNode.cpp:
+ (WebCore::ConvolverNode::ConvolverNode):
+ * webaudio/ConvolverNode.h:
+ (WebCore::ConvolverNode::create):
+ * webaudio/DefaultAudioDestinationNode.cpp:
+ (WebCore::DefaultAudioDestinationNode::initialize):
+ * webaudio/DefaultAudioDestinationNode.h:
+ (WebCore::DefaultAudioDestinationNode::sampleRate):
+ * webaudio/DelayDSPKernel.cpp:
+ (WebCore::DelayDSPKernel::DelayDSPKernel):
+ (WebCore::DelayDSPKernel::process):
+ * webaudio/DelayDSPKernel.h:
+ * webaudio/DelayNode.cpp:
+ (WebCore::DelayNode::DelayNode):
+ * webaudio/DelayNode.h:
+ (WebCore::DelayNode::create):
+ * webaudio/DelayProcessor.cpp:
+ (WebCore::DelayProcessor::DelayProcessor):
+ * webaudio/DelayProcessor.h:
+ * webaudio/DynamicsCompressorNode.cpp:
+ (WebCore::DynamicsCompressorNode::DynamicsCompressorNode):
+ * webaudio/DynamicsCompressorNode.h:
+ (WebCore::DynamicsCompressorNode::create):
+ * webaudio/HighPass2FilterNode.cpp:
+ (WebCore::HighPass2FilterNode::HighPass2FilterNode):
+ * webaudio/HighPass2FilterNode.h:
+ (WebCore::HighPass2FilterNode::create):
+ * webaudio/JavaScriptAudioNode.cpp:
+ (WebCore::JavaScriptAudioNode::create):
+ (WebCore::JavaScriptAudioNode::JavaScriptAudioNode):
+ (WebCore::JavaScriptAudioNode::initialize):
+ * webaudio/JavaScriptAudioNode.h:
+ * webaudio/LowPass2FilterNode.cpp:
+ (WebCore::LowPass2FilterNode::LowPass2FilterNode):
+ * webaudio/LowPass2FilterNode.h:
+ (WebCore::LowPass2FilterNode::create):
+ * webaudio/OfflineAudioDestinationNode.h:
+ (WebCore::OfflineAudioDestinationNode::sampleRate):
+ * webaudio/RealtimeAnalyserNode.cpp:
+ (WebCore::RealtimeAnalyserNode::RealtimeAnalyserNode):
+ * webaudio/RealtimeAnalyserNode.h:
+ (WebCore::RealtimeAnalyserNode::create):
+ * webaudio/WaveShaperDSPKernel.cpp:
+ (WebCore::WaveShaperDSPKernel::process):
+ * webaudio/WaveShaperProcessor.cpp:
+ (WebCore::WaveShaperProcessor::WaveShaperProcessor):
+ * webaudio/WaveShaperProcessor.h:
+
+
2011-10-05 Alexey Proskuryakov <ap@apple.com>
[Mac] Make built-in PDF description localizable
AudioBus::AudioBus(unsigned numberOfChannels, size_t length, bool allocate)
: m_length(length)
- , m_busGain(1.0)
+ , m_busGain(1)
, m_isFirstTime(true)
- , m_sampleRate(0.0)
+ , m_sampleRate(0)
{
m_channels.reserveInitialCapacity(numberOfChannels);
// Do the mono mixdown.
for (unsigned i = 0; i < n; ++i)
- destination[i] = 0.5 * (sourceL[i] + sourceR[i]);
+ destination[i] = (sourceL[i] + sourceR[i]) / 2;
destinationBus->setSampleRate(sourceBus->sampleRate());
return destinationBus.release();
size_t length() const { return m_length; }
// Sample-rate : 0.0 if unknown or "don't care"
- double sampleRate() const { return m_sampleRate; }
- void setSampleRate(double sampleRate) { m_sampleRate = sampleRate; }
+ float sampleRate() const { return m_sampleRate; }
+ void setSampleRate(float sampleRate) { m_sampleRate = sampleRate; }
// Zeroes all channels.
void zero();
// Makes maximum absolute value == 1.0 (if possible).
void normalize();
- static PassOwnPtr<AudioBus> loadPlatformResource(const char* name, double sampleRate);
+ static PassOwnPtr<AudioBus> loadPlatformResource(const char* name, float sampleRate);
protected:
AudioBus() { };
double m_busGain;
bool m_isFirstTime;
- double m_sampleRate; // 0.0 if unknown or N/A
+ float m_sampleRate; // 0.0 if unknown or N/A
};
} // WebCore
{
}
- AudioDSPKernel(double sampleRate)
+ AudioDSPKernel(float sampleRate)
: m_kernelProcessor(0)
, m_sampleRate(sampleRate)
{
virtual void process(const float* source, float* destination, size_t framesToProcess) = 0;
virtual void reset() = 0;
- double sampleRate() const { return m_sampleRate; }
+ float sampleRate() const { return m_sampleRate; }
double nyquist() const { return 0.5 * sampleRate(); }
AudioDSPKernelProcessor* processor() { return m_kernelProcessor; }
protected:
AudioDSPKernelProcessor* m_kernelProcessor;
- double m_sampleRate;
+ float m_sampleRate;
};
} // namespace WebCore
namespace WebCore {
// setNumberOfChannels() may later be called if the object is not yet in an "initialized" state.
-AudioDSPKernelProcessor::AudioDSPKernelProcessor(double sampleRate, unsigned numberOfChannels)
+AudioDSPKernelProcessor::AudioDSPKernelProcessor(float sampleRate, unsigned numberOfChannels)
: AudioProcessor(sampleRate)
, m_numberOfChannels(numberOfChannels)
, m_hasJustReset(true)
class AudioDSPKernelProcessor : public AudioProcessor {
public:
// numberOfChannels may be later changed if object is not yet in an "initialized" state
- AudioDSPKernelProcessor(double sampleRate, unsigned numberOfChannels);
+ AudioDSPKernelProcessor(float sampleRate, unsigned numberOfChannels);
// Subclasses create the appropriate type of processing kernel here.
// We'll call this to create a kernel for each channel.
class AudioDestination {
public:
- static PassOwnPtr<AudioDestination> create(AudioSourceProvider&, double sampleRate);
+ static PassOwnPtr<AudioDestination> create(AudioSourceProvider&, float sampleRate);
virtual ~AudioDestination() { }
virtual bool isPlaying() = 0;
// Sample-rate conversion may happen in AudioDestination to the hardware sample-rate
- virtual double sampleRate() const = 0;
- static double hardwareSampleRate();
+ virtual float sampleRate() const = 0;
+ static float hardwareSampleRate();
};
} // namespace WebCore
// sampleRate will be made (if it doesn't already match the file's sample-rate).
// The created buffer will have its sample-rate set correctly to the result.
-PassOwnPtr<AudioBus> createBusFromInMemoryAudioFile(const void* data, size_t dataSize, bool mixToMono, double sampleRate);
+PassOwnPtr<AudioBus> createBusFromInMemoryAudioFile(const void* data, size_t dataSize, bool mixToMono, float sampleRate);
-PassOwnPtr<AudioBus> createBusFromAudioFile(const char* filePath, bool mixToMono, double sampleRate);
+PassOwnPtr<AudioBus> createBusFromAudioFile(const char* filePath, bool mixToMono, float sampleRate);
// May pass in 0.0 for sampleRate in which case it will use the AudioBus's sampleRate
void writeBusToAudioFile(AudioBus* bus, const char* filePath, double fileSampleRate);
class AudioProcessor {
public:
- AudioProcessor(double sampleRate)
+ AudioProcessor(float sampleRate)
: m_initialized(false)
, m_sampleRate(sampleRate)
{
bool isInitialized() const { return m_initialized; }
- double sampleRate() const { return m_sampleRate; }
+ float sampleRate() const { return m_sampleRate; }
protected:
bool m_initialized;
- double m_sampleRate;
+ float m_sampleRate;
};
} // namespace WebCore
namespace AudioUtilities {
-double decibelsToLinear(double decibels)
+float decibelsToLinear(float decibels)
{
- return pow(10.0, 0.05 * decibels);
+ return powf(10, 0.05f * decibels);
}
-double linearToDecibels(double linear)
+float linearToDecibels(float linear)
{
// It's not possible to calculate decibels for a zero linear value since it would be -Inf.
// -1000.0 dB represents a very tiny linear value in case we ever reach this case.
ASSERT(linear);
if (!linear)
- return -1000.0;
+ return -1000;
- return 20.0 * log10(linear);
+ return 20 * log10f(linear);
}
-double discreteTimeConstantForSampleRate(double timeConstant, double sampleRate)
+float discreteTimeConstantForSampleRate(float timeConstant, float sampleRate)
{
// hardcoded value is temporary build fix for Windows.
// FIXME: replace hardcode 2.718282 with M_E until the correct MathExtras.h solution is determined.
- return 1.0 - pow(1.0 / 2.718282, 1.0 / (sampleRate * timeConstant));
+ return 1 - powf(1 / 2.718282f, 1 / (sampleRate * timeConstant));
}
} // AudioUtilites
namespace AudioUtilities {
// Standard functions for converting to and from decibel values from linear.
-double linearToDecibels(double);
-double decibelsToLinear(double);
+float linearToDecibels(float);
+float decibelsToLinear(float);
// timeConstant is the time it takes a first-order linear time-invariant system
// to reach the value 1 - 1/e (around 63.2%) given a step input response.
// discreteTimeConstantForSampleRate() will return the discrete time-constant for the specific sampleRate.
-double discreteTimeConstantForSampleRate(double timeConstant, double sampleRate);
+float discreteTimeConstantForSampleRate(float timeConstant, float sampleRate);
} // AudioUtilites
using namespace AudioUtilities;
-DynamicsCompressor::DynamicsCompressor(bool isStereo, double sampleRate)
+DynamicsCompressor::DynamicsCompressor(bool isStereo, float sampleRate)
: m_isStereo(isStereo)
, m_sampleRate(sampleRate)
, m_compressor(sampleRate)
m_parameters[ParamThreshold] = -24; // dB
m_parameters[ParamHeadroom] = 21; // dB
- m_parameters[ParamAttack] = 0.003; // seconds
- m_parameters[ParamRelease] = 0.250; // seconds
- m_parameters[ParamPreDelay] = 0.006; // seconds
+ m_parameters[ParamAttack] = 0.003f; // seconds
+ m_parameters[ParamRelease] = 0.250f; // seconds
+ m_parameters[ParamPreDelay] = 0.006f; // seconds
// Release zone values 0 -> 1.
- m_parameters[ParamReleaseZone1] = 0.09;
- m_parameters[ParamReleaseZone2] = 0.16;
- m_parameters[ParamReleaseZone3] = 0.42;
- m_parameters[ParamReleaseZone4] = 0.98;
+ m_parameters[ParamReleaseZone1] = 0.09f;
+ m_parameters[ParamReleaseZone2] = 0.16f;
+ m_parameters[ParamReleaseZone3] = 0.42f;
+ m_parameters[ParamReleaseZone4] = 0.98f;
- m_parameters[ParamFilterStageGain] = 4.4; // dB
+ m_parameters[ParamFilterStageGain] = 4.4f; // dB
m_parameters[ParamFilterStageRatio] = 2;
m_parameters[ParamFilterAnchor] = 15000 / nyquist();
m_parameters[ParamEffectBlend] = 1;
}
-double DynamicsCompressor::parameterValue(unsigned parameterID)
+float DynamicsCompressor::parameterValue(unsigned parameterID)
{
ASSERT(parameterID < ParamLast);
return m_parameters[parameterID];
float gk = 1 - gain / 20;
float f1 = normalizedFrequency * gk;
float f2 = normalizedFrequency / gk;
- float r1 = exp(-f1 * piDouble);
- float r2 = exp(-f2 * piDouble);
+ float r1 = expf(-f1 * piFloat);
+ float r2 = expf(-f2 * piFloat);
// Set pre-filter zero and pole to create an emphasis filter.
m_preFilter[stageIndex].setZero(r1);
// 1 mixes in only the compressed signal.
float effectBlend = parameterValue(ParamEffectBlend);
- double releaseZone1 = parameterValue(ParamReleaseZone1);
- double releaseZone2 = parameterValue(ParamReleaseZone2);
- double releaseZone3 = parameterValue(ParamReleaseZone3);
- double releaseZone4 = parameterValue(ParamReleaseZone4);
+ float releaseZone1 = parameterValue(ParamReleaseZone1);
+ float releaseZone2 = parameterValue(ParamReleaseZone2);
+ float releaseZone3 = parameterValue(ParamReleaseZone3);
+ float releaseZone4 = parameterValue(ParamReleaseZone4);
// Apply compression to the pre-filtered signal.
// The processing is performed in place.
ParamLast
};
- DynamicsCompressor(bool isStereo, double sampleRate);
+ DynamicsCompressor(bool isStereo, float sampleRate);
void process(AudioBus* sourceBus, AudioBus* destinationBus, unsigned framesToProcess);
void reset();
- double parameterValue(unsigned parameterID);
+ float parameterValue(unsigned parameterID);
bool isStereo() const { return m_isStereo; }
- double sampleRate() const { return m_sampleRate; }
- double nyquist() const { return 0.5 * m_sampleRate; }
+ float sampleRate() const { return m_sampleRate; }
+ float nyquist() const { return m_sampleRate / 2; }
protected:
// m_parameters holds the tweakable compressor parameters.
// FIXME: expose some of the most important ones (such as threshold, attack, release)
// as DynamicsCompressorNode attributes.
- double m_parameters[ParamLast];
+ float m_parameters[ParamLast];
void initializeParameters();
bool m_isStereo;
- double m_sampleRate;
+ float m_sampleRate;
// Emphasis filter controls.
float m_lastFilterStageRatio;
using namespace AudioUtilities;
// Metering hits peaks instantly, but releases this fast (in seconds).
-const double meteringReleaseTimeConstant = 0.325;
+const float meteringReleaseTimeConstant = 0.325f;
// Exponential saturation curve.
-static double saturate(double x, double k)
+static float saturate(float x, float k)
{
return 1 - exp(-k * x);
}
-DynamicsCompressorKernel::DynamicsCompressorKernel(double sampleRate)
+DynamicsCompressorKernel::DynamicsCompressorKernel(float sampleRate)
: m_sampleRate(sampleRate)
, m_lastPreDelayFrames(DefaultPreDelayFrames)
, m_preDelayBufferL(MaxPreDelayFrames)
float wetMix = effectBlend;
// Threshold and headroom.
- double linearThreshold = decibelsToLinear(dbThreshold);
- double linearHeadroom = decibelsToLinear(dbHeadroom);
+ float linearThreshold = decibelsToLinear(dbThreshold);
+ float linearHeadroom = decibelsToLinear(dbHeadroom);
// Makeup gain.
- double maximum = 1.05 * linearHeadroom * linearThreshold;
- double kk = (maximum - linearThreshold);
- double inverseKK = 1 / kk;
+ float maximum = 1.05f * linearHeadroom * linearThreshold;
+ float kk = (maximum - linearThreshold);
+ float inverseKK = 1 / kk;
- double fullRangeGain = (linearThreshold + kk * saturate(1 - linearThreshold, 1));
- double fullRangeMakeupGain = 1 / fullRangeGain;
+ float fullRangeGain = (linearThreshold + kk * saturate(1 - linearThreshold, 1));
+ float fullRangeMakeupGain = 1 / fullRangeGain;
// Empirical/perceptual tuning.
- fullRangeMakeupGain = pow(fullRangeMakeupGain, 0.6);
+ fullRangeMakeupGain = powf(fullRangeMakeupGain, 0.6f);
float masterLinearGain = decibelsToLinear(dbPostGain) * fullRangeMakeupGain;
float releaseFrames = sampleRate * releaseTime;
// Detector release time.
- double satReleaseTime = 0.0025;
- double satReleaseFrames = satReleaseTime * sampleRate;
+ float satReleaseTime = 0.0025f;
+ float satReleaseFrames = satReleaseTime * sampleRate;
// Create a smooth function which passes through four points.
// Polynomial of the form
// y = a + b*x + c*x^2 + d*x^3 + e*x^4;
- double y1 = releaseFrames * releaseZone1;
- double y2 = releaseFrames * releaseZone2;
- double y3 = releaseFrames * releaseZone3;
- double y4 = releaseFrames * releaseZone4;
+ float y1 = releaseFrames * releaseZone1;
+ float y2 = releaseFrames * releaseZone2;
+ float y3 = releaseFrames * releaseZone3;
+ float y4 = releaseFrames * releaseZone4;
// All of these coefficients were derived for 4th order polynomial curve fitting where the y values
// match the evenly spaced x values as follows: (y1 : x == 0, y2 : x == 1, y3 : x == 2, y4 : x == 3)
- double kA = 0.9999999999999998*y1 + 1.8432219684323923e-16*y2 - 1.9373394351676423e-16*y3 + 8.824516011816245e-18*y4;
- double kB = -1.5788320352845888*y1 + 2.3305837032074286*y2 - 0.9141194204840429*y3 + 0.1623677525612032*y4;
- double kC = 0.5334142869106424*y1 - 1.272736789213631*y2 + 0.9258856042207512*y3 - 0.18656310191776226*y4;
- double kD = 0.08783463138207234*y1 - 0.1694162967925622*y2 + 0.08588057951595272*y3 - 0.00429891410546283*y4;
- double kE = -0.042416883008123074*y1 + 0.1115693827987602*y2 - 0.09764676325265872*y3 + 0.028494263462021576*y4;
+ float kA = 0.9999999999999998f*y1 + 1.8432219684323923e-16f*y2 - 1.9373394351676423e-16f*y3 + 8.824516011816245e-18f*y4;
+ float kB = -1.5788320352845888f*y1 + 2.3305837032074286f*y2 - 0.9141194204840429f*y3 + 0.1623677525612032f*y4;
+ float kC = 0.5334142869106424f*y1 - 1.272736789213631f*y2 + 0.9258856042207512f*y3 - 0.18656310191776226f*y4;
+ float kD = 0.08783463138207234f*y1 - 0.1694162967925622f*y2 + 0.08588057951595272f*y3 - 0.00429891410546283f*y4;
+ float kE = -0.042416883008123074f*y1 + 0.1115693827987602f*y2 - 0.09764676325265872f*y3 + 0.028494263462021576f*y4;
// x ranges from 0 -> 3 0 1 2 3
// -15 -10 -5 0db
float desiredGain = m_detectorAverage;
// Pre-warp so we get desiredGain after sin() warp below.
- double scaledDesiredGain = asin(desiredGain) / (0.5 * piDouble);
+ float scaledDesiredGain = asinf(desiredGain) / (0.5f * piFloat);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// Deal with envelopes
bool isReleasing = scaledDesiredGain > m_compressorGain;
// compressionDiffDb is the difference between current compression level and the desired level.
- double compressionDiffDb = linearToDecibels(m_compressorGain / scaledDesiredGain);
+ float compressionDiffDb = linearToDecibels(m_compressorGain / scaledDesiredGain);
if (isReleasing) {
// Release mode - compressionDiffDb should be negative dB
// Adaptive release - higher compression (lower compressionDiffDb) releases faster.
// Contain within range: -12 -> 0 then scale to go from 0 -> 3
- double x = compressionDiffDb;
- x = max(-12., x);
- x = min(0., x);
- x = 0.25 * (x + 12);
+ float x = compressionDiffDb;
+ x = max(-12.0f, x);
+ x = min(0.0f, x);
+ x = 0.25f * (x + 12);
// Compute adaptive release curve using 4th order polynomial.
// Normal values for the polynomial coefficients would create a monotonically increasing function.
- double x2 = x * x;
- double x3 = x2 * x;
- double x4 = x2 * x2;
- double releaseFrames = kA + kB * x + kC * x2 + kD * x3 + kE * x4;
+ float x2 = x * x;
+ float x3 = x2 * x;
+ float x4 = x2 * x2;
+ float releaseFrames = kA + kB * x + kC * x2 + kD * x3 + kE * x4;
#define kSpacingDb 5
- double dbPerFrame = kSpacingDb / releaseFrames;
+ float dbPerFrame = kSpacingDb / releaseFrames;
envelopeRate = decibelsToLinear(dbPerFrame);
} else {
if (m_maxAttackCompressionDiffDb == -1 || m_maxAttackCompressionDiffDb < compressionDiffDb)
m_maxAttackCompressionDiffDb = compressionDiffDb;
- double effAttenDiffDb = max(0.5f, m_maxAttackCompressionDiffDb);
+ float effAttenDiffDb = max(0.5f, m_maxAttackCompressionDiffDb);
- double x = 0.25 / effAttenDiffDb;
- envelopeRate = 1 - pow(x, double(1 / attackFrames));
+ float x = 0.25f / effAttenDiffDb;
+ envelopeRate = 1 - powf(x, 1 / attackFrames);
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
float undelayedL = *sourceL++;
float undelayedR = *sourceR++;
- compressorInput = 0.5 * (undelayedL + undelayedR);
+ compressorInput = 0.5f * (undelayedL + undelayedR);
inputL = delayBufferL[preDelayReadIndex];
inputR = delayBufferR[preDelayReadIndex];
// Calculate shaped power on undelayed input.
float scaledInput = compressorInput;
- double absInput = scaledInput > 0 ? scaledInput : -scaledInput;
+ float absInput = scaledInput > 0 ? scaledInput : -scaledInput;
// Put through shaping curve.
// This is linear up to the threshold, then exponentially approaches the maximum (headroom amount above threshold).
// The transition from the threshold to the exponential portion is smooth (1st derivative matched).
- double shapedInput = absInput < linearThreshold ? absInput : linearThreshold + kk * saturate(absInput - linearThreshold, inverseKK);
+ float shapedInput = absInput < linearThreshold ? absInput : linearThreshold + kk * saturate(absInput - linearThreshold, inverseKK);
- double attenuation = absInput <= 0.0001 ? 1 : shapedInput / absInput;
+ float attenuation = absInput <= 0.0001f ? 1 : shapedInput / absInput;
- double attenuationDb = -linearToDecibels(attenuation);
- attenuationDb = max(2., attenuationDb);
+ float attenuationDb = -linearToDecibels(attenuation);
+ attenuationDb = max(2.0f, attenuationDb);
- double dbPerFrame = attenuationDb / satReleaseFrames;
+ float dbPerFrame = attenuationDb / satReleaseFrames;
- double satReleaseRate = decibelsToLinear(dbPerFrame) - 1;
+ float satReleaseRate = decibelsToLinear(dbPerFrame) - 1;
bool isRelease = (attenuation > detectorAverage);
- double rate = isRelease ? satReleaseRate : 1;
+ float rate = isRelease ? satReleaseRate : 1;
detectorAverage += (attenuation - detectorAverage) * rate;
detectorAverage = min(1.0f, detectorAverage);
}
// Warp pre-compression gain to smooth out sharp exponential transition points.
- double postWarpCompressorGain = sin(0.5 * piDouble * compressorGain);
+ float postWarpCompressorGain = sinf(0.5f * piFloat * compressorGain);
// Calculate total gain using master gain and effect blend.
- double totalGain = dryMix + wetMix * masterLinearGain * postWarpCompressorGain;
+ float totalGain = dryMix + wetMix * masterLinearGain * postWarpCompressorGain;
// Calculate metering.
- double dbRealGain = 20 * log10(postWarpCompressorGain);
+ float dbRealGain = 20 * log10(postWarpCompressorGain);
if (dbRealGain < m_meteringGain)
m_meteringGain = dbRealGain;
else
class DynamicsCompressorKernel {
public:
- DynamicsCompressorKernel(double sampleRate);
+ DynamicsCompressorKernel(float sampleRate);
// Performs stereo-linked compression.
void process(float *sourceL,
#include <wtf/MathExtras.h>
// Use a 50ms smoothing / de-zippering time-constant.
-const double SmoothingTimeConstant = 0.050;
+const float SmoothingTimeConstant = 0.050f;
using namespace std;
namespace WebCore {
-EqualPowerPanner::EqualPowerPanner(double sampleRate)
+EqualPowerPanner::EqualPowerPanner(float sampleRate)
: Panner(PanningModelEqualPower)
, m_isFirstRender(true)
, m_gainL(0.0)
class EqualPowerPanner : public Panner {
public:
- EqualPowerPanner(double sampleRate);
+ EqualPowerPanner(float sampleRate);
virtual void pan(double azimuth, double elevation, AudioBus* inputBus, AudioBus* outputBuf, size_t framesToProcess);
const unsigned HRTFDatabase::InterpolationFactor = 1;
const unsigned HRTFDatabase::NumberOfTotalElevations = NumberOfRawElevations * InterpolationFactor;
-PassOwnPtr<HRTFDatabase> HRTFDatabase::create(double sampleRate)
+PassOwnPtr<HRTFDatabase> HRTFDatabase::create(float sampleRate)
{
OwnPtr<HRTFDatabase> hrtfDatabase = adoptPtr(new HRTFDatabase(sampleRate));
return hrtfDatabase.release();
}
-HRTFDatabase::HRTFDatabase(double sampleRate)
+HRTFDatabase::HRTFDatabase(float sampleRate)
: m_elevations(NumberOfTotalElevations)
, m_sampleRate(sampleRate)
{
// Create the interpolated convolution kernels and delays.
for (unsigned jj = 1; jj < InterpolationFactor; ++jj) {
- double x = static_cast<double>(jj) / static_cast<double>(InterpolationFactor);
+ float x = static_cast<float>(jj) / static_cast<float>(InterpolationFactor);
m_elevations[i + jj] = HRTFElevation::createByInterpolatingSlices(m_elevations[i].get(), m_elevations[j].get(), x, sampleRate);
ASSERT(m_elevations[i + jj].get());
}
class HRTFDatabase {
WTF_MAKE_NONCOPYABLE(HRTFDatabase);
public:
- static PassOwnPtr<HRTFDatabase> create(double sampleRate);
+ static PassOwnPtr<HRTFDatabase> create(float sampleRate);
// getKernelsFromAzimuthElevation() returns a left and right ear kernel, and an interpolated left and right frame delay for the given azimuth and elevation.
// azimuthBlend must be in the range 0 -> 1.
// Returns the number of different azimuth angles.
static unsigned numberOfAzimuths() { return HRTFElevation::NumberOfTotalAzimuths; }
- double sampleRate() const { return m_sampleRate; }
+ float sampleRate() const { return m_sampleRate; }
private:
- explicit HRTFDatabase(double sampleRate);
+ explicit HRTFDatabase(float sampleRate);
// Minimum and maximum elevation angles (inclusive) for a HRTFDatabase.
static const int MinElevation;
static unsigned indexFromElevationAngle(double);
Vector<OwnPtr<HRTFElevation> > m_elevations;
- double m_sampleRate;
+ float m_sampleRate;
};
} // namespace WebCore
// Singleton
HRTFDatabaseLoader* HRTFDatabaseLoader::s_loader = 0;
-PassRefPtr<HRTFDatabaseLoader> HRTFDatabaseLoader::createAndLoadAsynchronouslyIfNecessary(double sampleRate)
+PassRefPtr<HRTFDatabaseLoader> HRTFDatabaseLoader::createAndLoadAsynchronouslyIfNecessary(float sampleRate)
{
ASSERT(isMainThread());
return loader;
}
-HRTFDatabaseLoader::HRTFDatabaseLoader(double sampleRate)
+HRTFDatabaseLoader::HRTFDatabaseLoader(float sampleRate)
: m_databaseLoaderThread(0)
, m_databaseSampleRate(sampleRate)
{
// Lazily creates the singleton HRTFDatabaseLoader (if not already created) and starts loading asynchronously (when created the first time).
// Returns the singleton HRTFDatabaseLoader.
// Must be called from the main thread.
- static PassRefPtr<HRTFDatabaseLoader> createAndLoadAsynchronouslyIfNecessary(double sampleRate);
+ static PassRefPtr<HRTFDatabaseLoader> createAndLoadAsynchronouslyIfNecessary(float sampleRate);
// Returns the singleton HRTFDatabaseLoader.
static HRTFDatabaseLoader* loader() { return s_loader; }
HRTFDatabase* database() { return m_hrtfDatabase.get(); }
- double databaseSampleRate() const { return m_databaseSampleRate; }
+ float databaseSampleRate() const { return m_databaseSampleRate; }
// Called in asynchronous loading thread.
void load();
private:
// Both constructor and destructor must be called from the main thread.
- explicit HRTFDatabaseLoader(double sampleRate);
+ explicit HRTFDatabaseLoader(float sampleRate);
// If it hasn't already been loaded, creates a new thread and initiates asynchronous loading of the default database.
// This must be called from the main thread.
Mutex m_threadLock;
ThreadIdentifier m_databaseLoaderThread;
- double m_databaseSampleRate;
+ float m_databaseSampleRate;
};
} // namespace WebCore
// Takes advantage of the symmetry and creates a composite version of the two measured versions. For example, we have both azimuth 30 and -30 degrees
// where the roles of left and right ears are reversed with respect to each other.
-bool HRTFElevation::calculateSymmetricKernelsForAzimuthElevation(int azimuth, int elevation, double sampleRate, const String& subjectName,
+bool HRTFElevation::calculateSymmetricKernelsForAzimuthElevation(int azimuth, int elevation, float sampleRate, const String& subjectName,
RefPtr<HRTFKernel>& kernelL, RefPtr<HRTFKernel>& kernelR)
{
RefPtr<HRTFKernel> kernelL1;
return false;
// Notice L/R reversal in symmetric version.
- kernelL = HRTFKernel::createInterpolatedKernel(kernelL1.get(), kernelR2.get(), 0.5);
- kernelR = HRTFKernel::createInterpolatedKernel(kernelR1.get(), kernelL2.get(), 0.5);
+ kernelL = HRTFKernel::createInterpolatedKernel(kernelL1.get(), kernelR2.get(), 0.5f);
+ kernelR = HRTFKernel::createInterpolatedKernel(kernelR1.get(), kernelL2.get(), 0.5f);
return true;
}
-bool HRTFElevation::calculateKernelsForAzimuthElevation(int azimuth, int elevation, double sampleRate, const String& subjectName,
+bool HRTFElevation::calculateKernelsForAzimuthElevation(int azimuth, int elevation, float sampleRate, const String& subjectName,
RefPtr<HRTFKernel>& kernelL, RefPtr<HRTFKernel>& kernelR)
{
// Valid values for azimuth are 0 -> 345 in 15 degree increments.
45 // 345
};
-PassOwnPtr<HRTFElevation> HRTFElevation::createForSubject(const String& subjectName, int elevation, double sampleRate)
+PassOwnPtr<HRTFElevation> HRTFElevation::createForSubject(const String& subjectName, int elevation, float sampleRate)
{
bool isElevationGood = elevation >= -45 && elevation <= 90 && (elevation / 15) * 15 == elevation;
ASSERT(isElevationGood);
// Create the interpolated convolution kernels and delays.
for (unsigned jj = 1; jj < InterpolationFactor; ++jj) {
- double x = double(jj) / double(InterpolationFactor); // interpolate from 0 -> 1
+ float x = float(jj) / float(InterpolationFactor); // interpolate from 0 -> 1
(*kernelListL)[i + jj] = HRTFKernel::createInterpolatedKernel(kernelListL->at(i).get(), kernelListL->at(j).get(), x);
(*kernelListR)[i + jj] = HRTFKernel::createInterpolatedKernel(kernelListR->at(i).get(), kernelListR->at(j).get(), x);
return hrtfElevation.release();
}
-PassOwnPtr<HRTFElevation> HRTFElevation::createByInterpolatingSlices(HRTFElevation* hrtfElevation1, HRTFElevation* hrtfElevation2, double x, double sampleRate)
+PassOwnPtr<HRTFElevation> HRTFElevation::createByInterpolatingSlices(HRTFElevation* hrtfElevation1, HRTFElevation* hrtfElevation2, float x, float sampleRate)
{
ASSERT(hrtfElevation1 && hrtfElevation2);
if (!hrtfElevation1 || !hrtfElevation2)
// Normally, there will only be a single HRTF database set, but this API supports the possibility of multiple ones with different names.
// Interpolated azimuths will be generated based on InterpolationFactor.
// Valid values for elevation are -45 -> +90 in 15 degree increments.
- static PassOwnPtr<HRTFElevation> createForSubject(const String& subjectName, int elevation, double sampleRate);
+ static PassOwnPtr<HRTFElevation> createForSubject(const String& subjectName, int elevation, float sampleRate);
// Given two HRTFElevations, and an interpolation factor x: 0 -> 1, returns an interpolated HRTFElevation.
- static PassOwnPtr<HRTFElevation> createByInterpolatingSlices(HRTFElevation* hrtfElevation1, HRTFElevation* hrtfElevation2, double x, double sampleRate);
+ static PassOwnPtr<HRTFElevation> createByInterpolatingSlices(HRTFElevation* hrtfElevation1, HRTFElevation* hrtfElevation2, float x, float sampleRate);
// Returns the list of left or right ear HRTFKernels for all the azimuths going from 0 to 360 degrees.
HRTFKernelList* kernelListL() { return m_kernelListL.get(); }
double elevationAngle() const { return m_elevationAngle; }
unsigned numberOfAzimuths() { return NumberOfTotalAzimuths; }
- double sampleRate() const { return m_sampleRate; }
+ float sampleRate() const { return m_sampleRate; }
// Returns the left and right kernels for the given azimuth index.
// The interpolated delays based on azimuthBlend: 0 -> 1 are returned in frameDelayL and frameDelayR.
// Valid values for azimuth are 0 -> 345 in 15 degree increments.
// Valid values for elevation are -45 -> +90 in 15 degree increments.
// Returns true on success.
- static bool calculateKernelsForAzimuthElevation(int azimuth, int elevation, double sampleRate, const String& subjectName,
+ static bool calculateKernelsForAzimuthElevation(int azimuth, int elevation, float sampleRate, const String& subjectName,
RefPtr<HRTFKernel>& kernelL, RefPtr<HRTFKernel>& kernelR);
// Given a specific azimuth and elevation angle, returns the left and right HRTFKernel in kernelL and kernelR.
// This method averages the measured response using symmetry of azimuth (for example by averaging the -30.0 and +30.0 azimuth responses).
// Returns true on success.
- static bool calculateSymmetricKernelsForAzimuthElevation(int azimuth, int elevation, double sampleRate, const String& subjectName,
+ static bool calculateSymmetricKernelsForAzimuthElevation(int azimuth, int elevation, float sampleRate, const String& subjectName,
RefPtr<HRTFKernel>& kernelL, RefPtr<HRTFKernel>& kernelR);
private:
- HRTFElevation(PassOwnPtr<HRTFKernelList> kernelListL, PassOwnPtr<HRTFKernelList> kernelListR, int elevation, double sampleRate)
+ HRTFElevation(PassOwnPtr<HRTFKernelList> kernelListL, PassOwnPtr<HRTFKernelList> kernelListR, int elevation, float sampleRate)
: m_kernelListL(kernelListL)
, m_kernelListR(kernelListR)
, m_elevationAngle(elevation)
OwnPtr<HRTFKernelList> m_kernelListL;
OwnPtr<HRTFKernelList> m_kernelListR;
double m_elevationAngle;
- double m_sampleRate;
+ float m_sampleRate;
};
} // namespace WebCore
#include "AudioChannel.h"
#include "Biquad.h"
#include "FFTFrame.h"
+#include "FloatConversion.h"
#include <wtf/MathExtras.h>
using namespace std;
// This represents the initial delay before the most energetic part of the impulse response.
// The sample-frame delay is removed from the impulseP impulse response, and this value is returned.
// the length of the passed in AudioChannel must be a power of 2.
-static double extractAverageGroupDelay(AudioChannel* channel, size_t analysisFFTSize)
+static float extractAverageGroupDelay(AudioChannel* channel, size_t analysisFFTSize)
{
ASSERT(channel);
FFTFrame estimationFrame(analysisFFTSize);
estimationFrame.doFFT(impulseP);
- double frameDelay = estimationFrame.extractAverageGroupDelay();
+ float frameDelay = narrowPrecisionToFloat(estimationFrame.extractAverageGroupDelay());
estimationFrame.doInverseFFT(impulseP);
return frameDelay;
}
-HRTFKernel::HRTFKernel(AudioChannel* channel, size_t fftSize, double sampleRate, bool bassBoost)
- : m_frameDelay(0.0)
+HRTFKernel::HRTFKernel(AudioChannel* channel, size_t fftSize, float sampleRate, bool bassBoost)
+ : m_frameDelay(0)
, m_sampleRate(sampleRate)
{
ASSERT(channel);
}
// Interpolates two kernels with x: 0 -> 1 and returns the result.
-PassRefPtr<HRTFKernel> HRTFKernel::createInterpolatedKernel(HRTFKernel* kernel1, HRTFKernel* kernel2, double x)
+PassRefPtr<HRTFKernel> HRTFKernel::createInterpolatedKernel(HRTFKernel* kernel1, HRTFKernel* kernel2, float x)
{
ASSERT(kernel1 && kernel2);
if (!kernel1 || !kernel2)
return 0;
ASSERT(x >= 0.0 && x < 1.0);
- x = min(1.0, max(0.0, x));
+ x = min(1.0f, max(0.0f, x));
- double sampleRate1 = kernel1->sampleRate();
- double sampleRate2 = kernel2->sampleRate();
+ float sampleRate1 = kernel1->sampleRate();
+ float sampleRate2 = kernel2->sampleRate();
ASSERT(sampleRate1 == sampleRate2);
if (sampleRate1 != sampleRate2)
return 0;
- double frameDelay = (1.0 - x) * kernel1->frameDelay() + x * kernel2->frameDelay();
+ float frameDelay = (1 - x) * kernel1->frameDelay() + x * kernel2->frameDelay();
OwnPtr<FFTFrame> interpolatedFrame = FFTFrame::createInterpolatedFrame(*kernel1->fftFrame(), *kernel2->fftFrame(), x);
return HRTFKernel::create(interpolatedFrame.release(), frameDelay, sampleRate1);
public:
// Note: this is destructive on the passed in AudioChannel.
// The length of channel must be a power of two.
- static PassRefPtr<HRTFKernel> create(AudioChannel* channel, size_t fftSize, double sampleRate, bool bassBoost)
+ static PassRefPtr<HRTFKernel> create(AudioChannel* channel, size_t fftSize, float sampleRate, bool bassBoost)
{
return adoptRef(new HRTFKernel(channel, fftSize, sampleRate, bassBoost));
}
- static PassRefPtr<HRTFKernel> create(PassOwnPtr<FFTFrame> fftFrame, double frameDelay, double sampleRate)
+ static PassRefPtr<HRTFKernel> create(PassOwnPtr<FFTFrame> fftFrame, float frameDelay, float sampleRate)
{
return adoptRef(new HRTFKernel(fftFrame, frameDelay, sampleRate));
}
// Given two HRTFKernels, and an interpolation factor x: 0 -> 1, returns an interpolated HRTFKernel.
- static PassRefPtr<HRTFKernel> createInterpolatedKernel(HRTFKernel* kernel1, HRTFKernel* kernel2, double x);
+ static PassRefPtr<HRTFKernel> createInterpolatedKernel(HRTFKernel* kernel1, HRTFKernel* kernel2, float x);
FFTFrame* fftFrame() { return m_fftFrame.get(); }
size_t fftSize() const { return m_fftFrame->fftSize(); }
- double frameDelay() const { return m_frameDelay; }
+ float frameDelay() const { return m_frameDelay; }
- double sampleRate() const { return m_sampleRate; }
+ float sampleRate() const { return m_sampleRate; }
double nyquist() const { return 0.5 * sampleRate(); }
// Converts back into impulse-response form.
private:
// Note: this is destructive on the passed in AudioChannel.
- HRTFKernel(AudioChannel* channel, size_t fftSize, double sampleRate, bool bassBoost);
+ HRTFKernel(AudioChannel*, size_t fftSize, float sampleRate, bool bassBoost);
- HRTFKernel(PassOwnPtr<FFTFrame> fftFrame, double frameDelay, double sampleRate)
+ HRTFKernel(PassOwnPtr<FFTFrame> fftFrame, float frameDelay, float sampleRate)
: m_fftFrame(fftFrame)
, m_frameDelay(frameDelay)
, m_sampleRate(sampleRate)
}
OwnPtr<FFTFrame> m_fftFrame;
- double m_frameDelay;
- double m_sampleRate;
+ float m_frameDelay;
+ float m_sampleRate;
};
typedef Vector<RefPtr<HRTFKernel> > HRTFKernelList;
// We ASSERT the delay values used in process() with this value.
const double MaxDelayTimeSeconds = 0.002;
-HRTFPanner::HRTFPanner(double sampleRate)
+HRTFPanner::HRTFPanner(float sampleRate)
: Panner(PanningModelHRTF)
, m_sampleRate(sampleRate)
, m_isFirstRender(true)
{
}
-size_t HRTFPanner::fftSizeForSampleRate(double sampleRate)
+size_t HRTFPanner::fftSizeForSampleRate(float sampleRate)
{
// The HRTF impulse responses (loaded as audio resources) are 512 sample-frames @44.1KHz.
// Currently, we truncate the impulse responses to half this size, but an FFT-size of twice impulse response size is needed (for convolution).
class HRTFPanner : public Panner {
public:
- explicit HRTFPanner(double sampleRate);
+ explicit HRTFPanner(float sampleRate);
virtual ~HRTFPanner();
// Panner
virtual void reset();
size_t fftSize() { return fftSizeForSampleRate(m_sampleRate); }
- static size_t fftSizeForSampleRate(double sampleRate);
+ static size_t fftSizeForSampleRate(float sampleRate);
- double sampleRate() const { return m_sampleRate; }
+ float sampleRate() const { return m_sampleRate; }
private:
// Given an azimuth angle in the range -180 -> +180, returns the corresponding azimuth index for the database,
// and azimuthBlend which is an interpolation value from 0 -> 1.
int calculateDesiredAzimuthIndexAndBlend(double azimuth, double& azimuthBlend);
- double m_sampleRate;
+ float m_sampleRate;
// m_isFirstRender and m_azimuthIndex are used to avoid harshly changing from rendering at one azimuth angle to another angle very far away.
// Changing the azimuth gradually produces a smoother sound.
namespace WebCore {
-PassOwnPtr<Panner> Panner::create(PanningModel model, double sampleRate)
+PassOwnPtr<Panner> Panner::create(PanningModel model, float sampleRate)
{
OwnPtr<Panner> panner;
typedef unsigned PanningModel;
- static PassOwnPtr<Panner> create(PanningModel model, double sampleRate);
+ static PassOwnPtr<Panner> create(PanningModel, float sampleRate);
virtual ~Panner() { };
namespace WebCore {
-PassOwnPtr<AudioBus> AudioBus::loadPlatformResource(const char* name, double sampleRate)
+PassOwnPtr<AudioBus> AudioBus::loadPlatformResource(const char* name, float sampleRate)
{
// FIXME: the sampleRate parameter is ignored. It should be removed from the API.
OwnPtr<AudioBus> audioBus = PlatformSupport::loadPlatformAudioResource(name, sampleRate);
return AudioBus::createBySampleRateConverting(audioBus.get(), false, sampleRate);
}
-PassOwnPtr<AudioBus> createBusFromInMemoryAudioFile(const void* data, size_t dataSize, bool mixToMono, double sampleRate)
+PassOwnPtr<AudioBus> createBusFromInMemoryAudioFile(const void* data, size_t dataSize, bool mixToMono, float sampleRate)
{
// FIXME: the sampleRate parameter is ignored. It should be removed from the API.
OwnPtr<AudioBus> audioBus = PlatformSupport::decodeAudioFileData(static_cast<const char*>(data), dataSize, sampleRate);
namespace WebCore {
-PassOwnPtr<AudioBus> AudioBus::loadPlatformResource(const char* name, double sampleRate)
+PassOwnPtr<AudioBus> AudioBus::loadPlatformResource(const char* name, float sampleRate)
{
// This method can be called from other than the main thread, so we need an auto-release pool.
NSAutoreleasePool* pool = [[NSAutoreleasePool alloc] init];
#include "AudioDestinationMac.h"
#include "AudioSourceProvider.h"
+#include "FloatConversion.h"
#include <CoreAudio/AudioHardware.h>
namespace WebCore {
const int kBufferSize = 128;
// Factory method: Mac-implementation
-PassOwnPtr<AudioDestination> AudioDestination::create(AudioSourceProvider& provider, double sampleRate)
+PassOwnPtr<AudioDestination> AudioDestination::create(AudioSourceProvider& provider, float sampleRate)
{
return adoptPtr(new AudioDestinationMac(provider, sampleRate));
}
-double AudioDestination::hardwareSampleRate()
+float AudioDestination::hardwareSampleRate()
{
// Determine the default output device's sample-rate.
AudioDeviceID deviceID = kAudioDeviceUnknown;
AudioObjectPropertyAddress defaultOutputDeviceAddress = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
OSStatus result = AudioObjectGetPropertyData(kAudioObjectSystemObject, &defaultOutputDeviceAddress, 0, 0, &infoSize, (void*)&deviceID);
if (result)
- return 0.0; // error
+ return 0; // error
Float64 nominalSampleRate;
infoSize = sizeof(Float64);
AudioObjectPropertyAddress nominalSampleRateAddress = { kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
result = AudioObjectGetPropertyData(deviceID, &nominalSampleRateAddress, 0, 0, &infoSize, (void*)&nominalSampleRate);
if (result)
- return 0.0; // error
+ return 0; // error
- return nominalSampleRate;
+ return narrowPrecisionToFloat(nominalSampleRate);
}
-AudioDestinationMac::AudioDestinationMac(AudioSourceProvider& provider, double sampleRate)
+AudioDestinationMac::AudioDestinationMac(AudioSourceProvider& provider, float sampleRate)
: m_outputUnit(0)
, m_provider(provider)
, m_renderBus(2, kBufferSize, false)
class AudioDestinationMac : public AudioDestination {
public:
- AudioDestinationMac(AudioSourceProvider&, double sampleRate);
+ AudioDestinationMac(AudioSourceProvider&, float sampleRate);
virtual ~AudioDestinationMac();
virtual void start();
virtual void stop();
bool isPlaying() { return m_isPlaying; }
- double sampleRate() const { return m_sampleRate; }
+ float sampleRate() const { return m_sampleRate; }
private:
void configure();
AudioSourceProvider& m_provider;
AudioBus m_renderBus;
- double m_sampleRate;
+ float m_sampleRate;
bool m_isPlaying;
};
#include "AudioBus.h"
#include "AudioFileReader.h"
+#include "FloatConversion.h"
#include <CoreFoundation/CoreFoundation.h>
#include <CoreServices/CoreServices.h>
return audioFileReader->dataSize();
}
-PassOwnPtr<AudioBus> AudioFileReader::createBus(double sampleRate, bool mixToMono)
+PassOwnPtr<AudioBus> AudioFileReader::createBus(float sampleRate, bool mixToMono)
{
if (!m_extAudioFileRef)
return nullptr;
// Create AudioBus where we'll put the PCM audio data
OwnPtr<AudioBus> audioBus = adoptPtr(new AudioBus(busChannelCount, numberOfFrames));
- audioBus->setSampleRate(m_clientDataFormat.mSampleRate); // save for later
+ audioBus->setSampleRate(narrowPrecisionToFloat(m_clientDataFormat.mSampleRate)); // save for later
// Only allocated in the mixToMono case
AudioFloatArray bufL;
return audioBus.release();
}
-PassOwnPtr<AudioBus> createBusFromAudioFile(const char* filePath, bool mixToMono, double sampleRate)
+PassOwnPtr<AudioBus> createBusFromAudioFile(const char* filePath, bool mixToMono, float sampleRate)
{
AudioFileReader reader(filePath);
return reader.createBus(sampleRate, mixToMono);
}
-PassOwnPtr<AudioBus> createBusFromInMemoryAudioFile(const void* data, size_t dataSize, bool mixToMono, double sampleRate)
+PassOwnPtr<AudioBus> createBusFromInMemoryAudioFile(const void* data, size_t dataSize, bool mixToMono, float sampleRate)
{
AudioFileReader reader(data, dataSize);
return reader.createBus(sampleRate, mixToMono);
~AudioFileReader();
// Returns 0 if error
- PassOwnPtr<AudioBus> createBus(double sampleRate, bool mixToMono);
+ PassOwnPtr<AudioBus> createBus(float sampleRate, bool mixToMono);
const void* data() const { return m_data; }
size_t dataSize() const { return m_dataSize; }
m_threadID = 0;
}
-void AsyncAudioDecoder::decodeAsync(ArrayBuffer* audioData, double sampleRate, PassRefPtr<AudioBufferCallback> successCallback, PassRefPtr<AudioBufferCallback> errorCallback)
+void AsyncAudioDecoder::decodeAsync(ArrayBuffer* audioData, float sampleRate, PassRefPtr<AudioBufferCallback> successCallback, PassRefPtr<AudioBufferCallback> errorCallback)
{
ASSERT(isMainThread());
ASSERT(audioData);
}
}
-PassOwnPtr<AsyncAudioDecoder::DecodingTask> AsyncAudioDecoder::DecodingTask::create(ArrayBuffer* audioData, double sampleRate, PassRefPtr<AudioBufferCallback> successCallback, PassRefPtr<AudioBufferCallback> errorCallback)
+PassOwnPtr<AsyncAudioDecoder::DecodingTask> AsyncAudioDecoder::DecodingTask::create(ArrayBuffer* audioData, float sampleRate, PassRefPtr<AudioBufferCallback> successCallback, PassRefPtr<AudioBufferCallback> errorCallback)
{
return adoptPtr(new DecodingTask(audioData, sampleRate, successCallback, errorCallback));
}
-AsyncAudioDecoder::DecodingTask::DecodingTask(ArrayBuffer* audioData, double sampleRate, PassRefPtr<AudioBufferCallback> successCallback, PassRefPtr<AudioBufferCallback> errorCallback)
+AsyncAudioDecoder::DecodingTask::DecodingTask(ArrayBuffer* audioData, float sampleRate, PassRefPtr<AudioBufferCallback> successCallback, PassRefPtr<AudioBufferCallback> errorCallback)
: m_audioData(audioData)
, m_sampleRate(sampleRate)
, m_successCallback(successCallback)
~AsyncAudioDecoder();
// Must be called on the main thread.
- void decodeAsync(ArrayBuffer* audioData, double sampleRate, PassRefPtr<AudioBufferCallback> successCallback, PassRefPtr<AudioBufferCallback> errorCallback);
+ void decodeAsync(ArrayBuffer* audioData, float sampleRate, PassRefPtr<AudioBufferCallback> successCallback, PassRefPtr<AudioBufferCallback> errorCallback);
private:
class DecodingTask {
WTF_MAKE_NONCOPYABLE(DecodingTask);
public:
- static PassOwnPtr<DecodingTask> create(ArrayBuffer* audioData, double sampleRate, PassRefPtr<AudioBufferCallback> successCallback, PassRefPtr<AudioBufferCallback> errorCallback);
+ static PassOwnPtr<DecodingTask> create(ArrayBuffer* audioData, float sampleRate, PassRefPtr<AudioBufferCallback> successCallback, PassRefPtr<AudioBufferCallback> errorCallback);
void decode();
private:
- DecodingTask(ArrayBuffer* audioData, double sampleRate, PassRefPtr<AudioBufferCallback> successCallback, PassRefPtr<AudioBufferCallback> errorCallback);
+ DecodingTask(ArrayBuffer* audioData, float sampleRate, PassRefPtr<AudioBufferCallback> successCallback, PassRefPtr<AudioBufferCallback> errorCallback);
ArrayBuffer* audioData() { return m_audioData.get(); }
- double sampleRate() const { return m_sampleRate; }
+ float sampleRate() const { return m_sampleRate; }
AudioBufferCallback* successCallback() { return m_successCallback.get(); }
AudioBufferCallback* errorCallback() { return m_errorCallback.get(); }
AudioBuffer* audioBuffer() { return m_audioBuffer.get(); }
void notifyComplete();
RefPtr<ArrayBuffer> m_audioData;
- double m_sampleRate;
+ float m_sampleRate;
RefPtr<AudioBufferCallback> m_successCallback;
RefPtr<AudioBufferCallback> m_errorCallback;
RefPtr<AudioBuffer> m_audioBuffer;
namespace WebCore {
-AudioBasicProcessorNode::AudioBasicProcessorNode(AudioContext* context, double sampleRate)
+AudioBasicProcessorNode::AudioBasicProcessorNode(AudioContext* context, float sampleRate)
: AudioNode(context, sampleRate)
{
addInput(adoptPtr(new AudioNodeInput(this)));
// AudioBasicProcessorNode is an AudioNode with one input and one output where the input and output have the same number of channels.
class AudioBasicProcessorNode : public AudioNode {
public:
- AudioBasicProcessorNode(AudioContext*, double sampleRate);
+ AudioBasicProcessorNode(AudioContext*, float sampleRate);
// AudioNode
virtual void process(size_t framesToProcess);
namespace WebCore {
-PassRefPtr<AudioBuffer> AudioBuffer::create(unsigned numberOfChannels, size_t numberOfFrames, double sampleRate)
+PassRefPtr<AudioBuffer> AudioBuffer::create(unsigned numberOfChannels, size_t numberOfFrames, float sampleRate)
{
return adoptRef(new AudioBuffer(numberOfChannels, numberOfFrames, sampleRate));
}
-PassRefPtr<AudioBuffer> AudioBuffer::createFromAudioFileData(const void* data, size_t dataSize, bool mixToMono, double sampleRate)
+PassRefPtr<AudioBuffer> AudioBuffer::createFromAudioFileData(const void* data, size_t dataSize, bool mixToMono, float sampleRate)
{
OwnPtr<AudioBus> bus = createBusFromInMemoryAudioFile(data, dataSize, mixToMono, sampleRate);
if (bus.get())
return 0;
}
-AudioBuffer::AudioBuffer(unsigned numberOfChannels, size_t numberOfFrames, double sampleRate)
+AudioBuffer::AudioBuffer(unsigned numberOfChannels, size_t numberOfFrames, float sampleRate)
: m_gain(1.0)
, m_sampleRate(sampleRate)
, m_length(numberOfFrames)
class AudioBuffer : public RefCounted<AudioBuffer> {
public:
- static PassRefPtr<AudioBuffer> create(unsigned numberOfChannels, size_t numberOfFrames, double sampleRate);
+ static PassRefPtr<AudioBuffer> create(unsigned numberOfChannels, size_t numberOfFrames, float sampleRate);
// Returns 0 if data is not a valid audio file.
- static PassRefPtr<AudioBuffer> createFromAudioFileData(const void* data, size_t dataSize, bool mixToMono, double sampleRate);
+ static PassRefPtr<AudioBuffer> createFromAudioFileData(const void* data, size_t dataSize, bool mixToMono, float sampleRate);
// Format
size_t length() const { return m_length; }
double duration() const { return length() / sampleRate(); }
- double sampleRate() const { return m_sampleRate; }
+ float sampleRate() const { return m_sampleRate; }
// Channel data access
unsigned numberOfChannels() const { return m_channels.size(); }
void releaseMemory();
protected:
- AudioBuffer(unsigned numberOfChannels, size_t numberOfFrames, double sampleRate);
+ AudioBuffer(unsigned numberOfChannels, size_t numberOfFrames, float sampleRate);
AudioBuffer(AudioBus* bus);
double m_gain; // scalar gain
- double m_sampleRate;
+ float m_sampleRate;
size_t m_length;
Vector<RefPtr<Float32Array> > m_channels;
#include "AudioContext.h"
#include "AudioNodeOutput.h"
#include "Document.h"
+#include "FloatConversion.h"
#include "ScriptCallStack.h"
#include <algorithm>
#include <wtf/MainThread.h>
// to minimize linear interpolation aliasing.
const double MaxRate = 1024;
-PassRefPtr<AudioBufferSourceNode> AudioBufferSourceNode::create(AudioContext* context, double sampleRate)
+PassRefPtr<AudioBufferSourceNode> AudioBufferSourceNode::create(AudioContext* context, float sampleRate)
{
return adoptRef(new AudioBufferSourceNode(context, sampleRate));
}
-AudioBufferSourceNode::AudioBufferSourceNode(AudioContext* context, double sampleRate)
+AudioBufferSourceNode::AudioBufferSourceNode(AudioContext* context, float sampleRate)
: AudioSourceNode(context, sampleRate)
, m_buffer(0)
, m_isPlaying(false)
// Careful - this is a tryLock() and not an autolocker, so we must unlock() before every return.
if (m_processLock.tryLock()) {
// Check if it's time to start playing.
- double sampleRate = this->sampleRate();
+ float sampleRate = this->sampleRate();
double quantumStartTime = context()->currentTime();
double quantumEndTime = quantumStartTime + framesToProcess / sampleRate;
// If the end time is somewhere in the middle of this time quantum, then simply zero out the
// frames starting at the end time.
if (m_endTime != UnknownTime && m_endTime >= quantumStartTime && m_endTime < quantumEndTime) {
- unsigned zeroStartFrame = (m_endTime - quantumStartTime) * sampleRate;
- unsigned framesToZero = framesToProcess - zeroStartFrame;
+ size_t zeroStartFrame = narrowPrecisionToFloat((m_endTime - quantumStartTime) * sampleRate);
+ size_t framesToZero = framesToProcess - zeroStartFrame;
bool isSafe = zeroStartFrame < framesToProcess && framesToZero <= framesToProcess && zeroStartFrame + framesToZero <= framesToProcess;
ASSERT(isSafe);
double sampleL1 = sourceL[readIndex];
double sampleL2 = sourceL[readIndex2];
double sampleL = (1.0 - interpolationFactor) * sampleL1 + interpolationFactor * sampleL2;
- *destinationL++ = sampleL;
+ *destinationL++ = narrowPrecisionToFloat(sampleL);
if (isStereo) {
double sampleR1 = sourceR[readIndex];
double sampleR2 = sourceR[readIndex2];
double sampleR = (1.0 - interpolationFactor) * sampleR1 + interpolationFactor * sampleR2;
- *destinationR++ = sampleR;
+ *destinationR++ = narrowPrecisionToFloat(sampleR);
}
virtualReadIndex += pitchRate;
class AudioBufferSourceNode : public AudioSourceNode {
public:
- static PassRefPtr<AudioBufferSourceNode> create(AudioContext*, double sampleRate);
+ static PassRefPtr<AudioBufferSourceNode> create(AudioContext*, float sampleRate);
virtual ~AudioBufferSourceNode();
void setPannerNode(PassRefPtr<AudioPannerNode> pannerNode) { m_pannerNode = pannerNode; }
private:
- AudioBufferSourceNode(AudioContext*, double sampleRate);
+ AudioBufferSourceNode(AudioContext*, float sampleRate);
void renderFromBuffer(AudioBus*, unsigned destinationFrameOffset, size_t numberOfFrames);
// It can easily be increased to support more if the web audio specification is updated.
const unsigned NumberOfInputs = 6;
-AudioChannelMerger::AudioChannelMerger(AudioContext* context, double sampleRate)
+AudioChannelMerger::AudioChannelMerger(AudioContext* context, float sampleRate)
: AudioNode(context, sampleRate)
{
// Create a fixed number of inputs (able to handle the maximum number of channels we deal with).
class AudioChannelMerger : public AudioNode {
public:
- static PassRefPtr<AudioChannelMerger> create(AudioContext* context, double sampleRate)
+ static PassRefPtr<AudioChannelMerger> create(AudioContext* context, float sampleRate)
{
return adoptRef(new AudioChannelMerger(context, sampleRate));
}
virtual void checkNumberOfChannelsForInput(AudioNodeInput*);
private:
- AudioChannelMerger(AudioContext*, double sampleRate);
+ AudioChannelMerger(AudioContext*, float sampleRate);
};
} // namespace WebCore
// It can easily be increased to support more if the web audio specification is updated.
const unsigned NumberOfOutputs = 6;
-AudioChannelSplitter::AudioChannelSplitter(AudioContext* context, double sampleRate)
+AudioChannelSplitter::AudioChannelSplitter(AudioContext* context, float sampleRate)
: AudioNode(context, sampleRate)
{
addInput(adoptPtr(new AudioNodeInput(this)));
class AudioChannelSplitter : public AudioNode {
public:
- static PassRefPtr<AudioChannelSplitter> create(AudioContext* context, double sampleRate)
+ static PassRefPtr<AudioChannelSplitter> create(AudioContext* context, float sampleRate)
{
return adoptRef(new AudioChannelSplitter(context, sampleRate));
}
virtual void reset();
private:
- AudioChannelSplitter(AudioContext*, double sampleRate);
+ AudioChannelSplitter(AudioContext*, float sampleRate);
};
} // namespace WebCore
namespace {
-bool isSampleRateRangeGood(double sampleRate)
+bool isSampleRateRangeGood(float sampleRate)
{
return sampleRate >= 22050 && sampleRate <= 96000;
}
return adoptRef(new AudioContext(document));
}
-PassRefPtr<AudioContext> AudioContext::createOfflineContext(Document* document, unsigned numberOfChannels, size_t numberOfFrames, double sampleRate, ExceptionCode& ec)
+PassRefPtr<AudioContext> AudioContext::createOfflineContext(Document* document, unsigned numberOfChannels, size_t numberOfFrames, float sampleRate, ExceptionCode& ec)
{
ASSERT(document);
}
// Constructor for offline (non-realtime) rendering.
-AudioContext::AudioContext(Document* document, unsigned numberOfChannels, size_t numberOfFrames, double sampleRate)
+AudioContext::AudioContext(Document* document, unsigned numberOfChannels, size_t numberOfFrames, float sampleRate)
: ActiveDOMObject(document, this)
, m_isInitialized(false)
, m_isAudioThreadFinished(false)
m_allocatedBuffers.append(buffer);
}
-PassRefPtr<AudioBuffer> AudioContext::createBuffer(unsigned numberOfChannels, size_t numberOfFrames, double sampleRate)
+PassRefPtr<AudioBuffer> AudioContext::createBuffer(unsigned numberOfChannels, size_t numberOfFrames, float sampleRate)
{
if (!isSampleRateRangeGood(sampleRate) || numberOfChannels > 10 || !numberOfFrames)
return 0;
static PassRefPtr<AudioContext> create(Document*);
// Create an AudioContext for offline (non-realtime) rendering.
- static PassRefPtr<AudioContext> createOfflineContext(Document*, unsigned numberOfChannels, size_t numberOfFrames, double sampleRate, ExceptionCode&);
+ static PassRefPtr<AudioContext> createOfflineContext(Document*, unsigned numberOfChannels, size_t numberOfFrames, float sampleRate, ExceptionCode&);
virtual ~AudioContext();
AudioDestinationNode* destination() { return m_destinationNode.get(); }
double currentTime() { return m_destinationNode->currentTime(); }
- double sampleRate() { return m_destinationNode->sampleRate(); }
+ float sampleRate() { return m_destinationNode->sampleRate(); }
- PassRefPtr<AudioBuffer> createBuffer(unsigned numberOfChannels, size_t numberOfFrames, double sampleRate);
+ PassRefPtr<AudioBuffer> createBuffer(unsigned numberOfChannels, size_t numberOfFrames, float sampleRate);
PassRefPtr<AudioBuffer> createBuffer(ArrayBuffer* arrayBuffer, bool mixToMono);
// Asynchronous audio file data decoding.
private:
AudioContext(Document*);
- AudioContext(Document*, unsigned numberOfChannels, size_t numberOfFrames, double sampleRate);
+ AudioContext(Document*, unsigned numberOfChannels, size_t numberOfFrames, float sampleRate);
void constructCommon();
void lazyInitialize();
namespace WebCore {
-AudioDestinationNode::AudioDestinationNode(AudioContext* context, double sampleRate)
+AudioDestinationNode::AudioDestinationNode(AudioContext* context, float sampleRate)
: AudioNode(context, sampleRate)
, m_currentTime(0.0)
{
class AudioDestinationNode : public AudioNode, public AudioSourceProvider {
public:
- AudioDestinationNode(AudioContext*, double sampleRate);
+ AudioDestinationNode(AudioContext*, float sampleRate);
virtual ~AudioDestinationNode();
// AudioNode
double currentTime() { return m_currentTime; }
- virtual double sampleRate() const = 0;
+ virtual float sampleRate() const = 0;
virtual unsigned numberOfChannels() const { return 2; } // FIXME: update when multi-channel (more than stereo) is supported
namespace WebCore {
-AudioGainNode::AudioGainNode(AudioContext* context, double sampleRate)
+AudioGainNode::AudioGainNode(AudioContext* context, float sampleRate)
: AudioNode(context, sampleRate)
, m_lastGain(1.0)
, m_sampleAccurateGainValues(AudioNode::ProcessingSizeInFrames) // FIXME: can probably share temp buffer in context
class AudioGainNode : public AudioNode {
public:
- static PassRefPtr<AudioGainNode> create(AudioContext* context, double sampleRate)
+ static PassRefPtr<AudioGainNode> create(AudioContext* context, float sampleRate)
{
return adoptRef(new AudioGainNode(context, sampleRate));
}
AudioGain* gain() { return m_gain.get(); }
private:
- AudioGainNode(AudioContext*, double sampleRate);
+ AudioGainNode(AudioContext*, float sampleRate);
double m_lastGain; // for de-zippering
RefPtr<AudioGain> m_gain;
AudioListener::AudioListener()
: m_position(0, 0, 0)
- , m_orientation(0.0, 0.0, -1.0)
- , m_upVector(0.0, 1.0, 0.0)
+ , m_orientation(0, 0, -1)
+ , m_upVector(0, 1, 0)
, m_velocity(0, 0, 0)
- , m_dopplerFactor(1.0)
+ , m_dopplerFactor(1)
, m_speedOfSound(343.3)
{
}
}
// Position
- void setPosition(double x, double y, double z) { setPosition(FloatPoint3D(x, y, z)); }
+ void setPosition(float x, float y, float z) { setPosition(FloatPoint3D(x, y, z)); }
void setPosition(const FloatPoint3D &position) { m_position = position; }
const FloatPoint3D& position() const { return m_position; }
// Orientation
- void setOrientation(double x, double y, double z, double upX, double upY, double upZ)
+ void setOrientation(float x, float y, float z, float upX, float upY, float upZ)
{
setOrientation(FloatPoint3D(x, y, z));
setUpVector(FloatPoint3D(upX, upY, upZ));
const FloatPoint3D& upVector() const { return m_upVector; }
// Velocity
- void setVelocity(double x, double y, double z) { setVelocity(FloatPoint3D(x, y, z)); }
+ void setVelocity(float x, float y, float z) { setVelocity(FloatPoint3D(x, y, z)); }
void setVelocity(const FloatPoint3D &velocity) { m_velocity = velocity; }
const FloatPoint3D& velocity() const { return m_velocity; }
namespace WebCore {
-AudioNode::AudioNode(AudioContext* context, double sampleRate)
+AudioNode::AudioNode(AudioContext* context, float sampleRate)
: m_isInitialized(false)
, m_nodeType(NodeTypeUnknown)
, m_context(context)
public:
enum { ProcessingSizeInFrames = 128 };
- AudioNode(AudioContext*, double sampleRate);
+ AudioNode(AudioContext*, float sampleRate);
virtual ~AudioNode();
AudioContext* context() { return m_context.get(); }
bool connect(AudioNode* destination, unsigned outputIndex = 0, unsigned inputIndex = 0);
bool disconnect(unsigned outputIndex = 0);
- double sampleRate() const { return m_sampleRate; }
+ float sampleRate() const { return m_sampleRate; }
// processIfNecessary() is called by our output(s) when the rendering graph needs this AudioNode to process.
// This method ensures that the AudioNode will only process once per rendering time quantum even if it's called repeatedly.
volatile bool m_isInitialized;
NodeType m_nodeType;
RefPtr<AudioContext> m_context;
- double m_sampleRate;
+ float m_sampleRate;
Vector<OwnPtr<AudioNodeInput> > m_inputs;
Vector<OwnPtr<AudioNodeOutput> > m_outputs;
x = 0.0;
}
-AudioPannerNode::AudioPannerNode(AudioContext* context, double sampleRate)
+AudioPannerNode::AudioPannerNode(AudioContext* context, float sampleRate)
: AudioNode(context, sampleRate)
, m_panningModel(Panner::PanningModelHRTF)
, m_lastGain(-1.0)
FloatPoint3D up = listenerRight.cross(listenerFrontNorm);
- double upProjection = sourceListener.dot(up);
+ float upProjection = sourceListener.dot(up);
FloatPoint3D projectedSource = sourceListener - upProjection * up;
projectedSource.normalize();
SOUNDFIELD = 2,
};
- static PassRefPtr<AudioPannerNode> create(AudioContext* context, double sampleRate)
+ static PassRefPtr<AudioPannerNode> create(AudioContext* context, float sampleRate)
{
return adoptRef(new AudioPannerNode(context, sampleRate));
}
AudioGain* coneGain() { return m_coneGain.get(); }
private:
- AudioPannerNode(AudioContext*, double sampleRate);
+ AudioPannerNode(AudioContext*, float sampleRate);
// Returns the combined distance and cone gain attenuation.
float distanceConeGain();
#include "AudioNode.h"
#include "AudioUtilities.h"
+#include "FloatConversion.h"
#include <wtf/MathExtras.h>
namespace WebCore {
// Update value for timeline.
if (context() && context()->isAudioThread()) {
bool hasValue;
- float timelineValue = m_timeline.valueForContextTime(context(), m_value, hasValue);
+ float timelineValue = m_timeline.valueForContextTime(context(), narrowPrecisionToFloat(m_value), hasValue);
if (hasValue)
m_value = timelineValue;
}
- return static_cast<float>(m_value);
+ return narrowPrecisionToFloat(m_value);
}
void AudioParam::setValue(float value)
float AudioParam::smoothedValue()
{
- return static_cast<float>(m_smoothedValue);
+ return narrowPrecisionToFloat(m_smoothedValue);
}
bool AudioParam::smooth()
// Smoothing effectively is performed by the timeline.
bool useTimelineValue = false;
if (context())
- m_value = m_timeline.valueForContextTime(context(), m_value, useTimelineValue);
+ m_value = m_timeline.valueForContextTime(context(), narrowPrecisionToFloat(m_value), useTimelineValue);
if (m_smoothedValue == m_value) {
// Smoothed value has already approached and snapped to value.
// Calculate values for this render quantum.
// Normally numberOfValues will equal AudioNode::ProcessingSizeInFrames (the render quantum size).
float sampleRate = context()->sampleRate();
- float startTime = context()->currentTime();
+ float startTime = narrowPrecisionToFloat(context()->currentTime());
float endTime = startTime + numberOfValues / sampleRate;
// Note we're running control rate at the sample-rate.
// Pass in the current value as default value.
- m_value = m_timeline.valuesForTimeRange(startTime, endTime, m_value, values, numberOfValues, sampleRate, sampleRate);
+ m_value = m_timeline.valuesForTimeRange(startTime, endTime, narrowPrecisionToFloat(m_value), values, numberOfValues, sampleRate, sampleRate);
}
} // namespace WebCore
#include "AudioParamTimeline.h"
#include "AudioUtilities.h"
+#include "FloatConversion.h"
#include <algorithm>
#include <wtf/MathExtras.h>
// Ask for just a single value.
float value;
float sampleRate = context->sampleRate();
- float startTime = context->currentTime();
- float endTime = startTime + 1.1 / sampleRate; // time just beyond one sample-frame
+ float startTime = narrowPrecisionToFloat(context->currentTime());
+ float endTime = startTime + 1.1f / sampleRate; // time just beyond one sample-frame
float controlRate = sampleRate / AudioNode::ProcessingSizeInFrames; // one parameter change per render quantum
value = valuesForTimeRange(startTime, endTime, defaultValue, &value, 1, sampleRate, controlRate);
}
// Returns the rounded down integer sample-frame for the time and sample-rate.
-static unsigned timeToSampleFrame(double time, double sampleRate)
+static unsigned timeToSampleFrame(double time, float sampleRate)
{
double k = 0.5 / sampleRate;
return static_cast<unsigned>((time + k) * sampleRate);
}
// Maintain a running time and index for writing the values buffer.
- double currentTime = startTime;
+ float currentTime = startTime;
unsigned writeIndex = 0;
// If first event is after startTime then fill initial part of values buffer with defaultValue
values[writeIndex] = value;
} else {
// Interpolate in log space.
- value1 = log2(value1);
- value2 = log2(value2);
+ value1 = log2f(value1);
+ value2 = log2f(value2);
// FIXME: optimize to not use pow() in inner loop, this is just a simple exponential ramp.
for (; writeIndex < fillToFrame; ++writeIndex) {
class AudioSourceNode : public AudioNode {
public:
- AudioSourceNode(AudioContext* context, double sampleRate)
+ AudioSourceNode(AudioContext* context, float sampleRate)
: AudioNode(context, sampleRate)
{
}
namespace WebCore {
-BiquadFilterNode::BiquadFilterNode(AudioContext* context, double sampleRate)
+BiquadFilterNode::BiquadFilterNode(AudioContext* context, float sampleRate)
: AudioBasicProcessorNode(context, sampleRate)
{
// Initially setup as lowpass filter.
ALLPASS = 7
};
- static PassRefPtr<BiquadFilterNode> create(AudioContext* context, double sampleRate)
+ static PassRefPtr<BiquadFilterNode> create(AudioContext* context, float sampleRate)
{
return adoptRef(new BiquadFilterNode(context, sampleRate));
}
AudioParam* gain() { return biquadProcessor()->parameter3(); }
private:
- BiquadFilterNode(AudioContext*, double sampleRate);
+ BiquadFilterNode(AudioContext*, float sampleRate);
BiquadProcessor* biquadProcessor() { return static_cast<BiquadProcessor*>(processor()); }
};
namespace WebCore {
-BiquadProcessor::BiquadProcessor(double sampleRate, size_t numberOfChannels, bool autoInitialize)
+BiquadProcessor::BiquadProcessor(float sampleRate, size_t numberOfChannels, bool autoInitialize)
: AudioDSPKernelProcessor(sampleRate, numberOfChannels)
, m_type(LowPass)
, m_parameter1(0)
initialize();
}
-BiquadProcessor::BiquadProcessor(FilterType type, double sampleRate, size_t numberOfChannels, bool autoInitialize)
+BiquadProcessor::BiquadProcessor(FilterType type, float sampleRate, size_t numberOfChannels, bool autoInitialize)
: AudioDSPKernelProcessor(sampleRate, numberOfChannels)
, m_type(type)
, m_parameter1(0)
Allpass = 7
};
- BiquadProcessor(double sampleRate, size_t numberOfChannels, bool autoInitialize);
+ BiquadProcessor(float sampleRate, size_t numberOfChannels, bool autoInitialize);
// Old constructor used by deprecated LowPass2FilterNode and HighPass2FilterNode
- BiquadProcessor(FilterType, double sampleRate, size_t numberOfChannels, bool autoInitialize = true);
+ BiquadProcessor(FilterType, float sampleRate, size_t numberOfChannels, bool autoInitialize = true);
virtual ~BiquadProcessor();
namespace WebCore {
-ConvolverNode::ConvolverNode(AudioContext* context, double sampleRate)
+ConvolverNode::ConvolverNode(AudioContext* context, float sampleRate)
: AudioNode(context, sampleRate)
{
addInput(adoptPtr(new AudioNodeInput(this)));
class ConvolverNode : public AudioNode {
public:
- static PassRefPtr<ConvolverNode> create(AudioContext* context, double sampleRate)
+ static PassRefPtr<ConvolverNode> create(AudioContext* context, float sampleRate)
{
return adoptRef(new ConvolverNode(context, sampleRate));
}
AudioBuffer* buffer();
private:
- ConvolverNode(AudioContext*, double sampleRate);
+ ConvolverNode(AudioContext*, float sampleRate);
OwnPtr<Reverb> m_reverb;
RefPtr<AudioBuffer> m_buffer;
if (isInitialized())
return;
- double hardwareSampleRate = AudioDestination::hardwareSampleRate();
+ float hardwareSampleRate = AudioDestination::hardwareSampleRate();
#ifndef NDEBUG
fprintf(stderr, ">>>> hardwareSampleRate = %f\n", hardwareSampleRate);
#endif
virtual void initialize();
virtual void uninitialize();
- double sampleRate() const { return m_destination->sampleRate(); }
+ float sampleRate() const { return m_destination->sampleRate(); }
virtual void startRendering();
using namespace std;
-const double DefaultMaxDelayTime = 1.0;
-const double SmoothingTimeConstant = 0.020; // 20ms
+const float DefaultMaxDelayTime = 1;
+const float SmoothingTimeConstant = 0.020f; // 20ms
namespace WebCore {
m_smoothingRate = AudioUtilities::discreteTimeConstantForSampleRate(SmoothingTimeConstant, processor->sampleRate());
}
-DelayDSPKernel::DelayDSPKernel(double maxDelayTime, double sampleRate)
+DelayDSPKernel::DelayDSPKernel(double maxDelayTime, float sampleRate)
: AudioDSPKernel(sampleRate)
, m_maxDelayTime(maxDelayTime)
, m_writeIndex(0)
if (!source || !destination)
return;
- double sampleRate = this->sampleRate();
+ float sampleRate = this->sampleRate();
double delayTime = delayProcessor() ? delayProcessor()->delayTime()->value() : m_desiredDelayFrames / sampleRate;
// Make sure the delay time is in a valid range.
class DelayDSPKernel : public AudioDSPKernel {
public:
DelayDSPKernel(DelayProcessor*);
- DelayDSPKernel(double maxDelayTime, double sampleRate);
+ DelayDSPKernel(double maxDelayTime, float sampleRate);
virtual void process(const float* source, float* destination, size_t framesToProcess);
virtual void reset();
namespace WebCore {
-DelayNode::DelayNode(AudioContext* context, double sampleRate)
+DelayNode::DelayNode(AudioContext* context, float sampleRate)
: AudioBasicProcessorNode(context, sampleRate)
{
m_processor = adoptPtr(new DelayProcessor(sampleRate, 1));
class DelayNode : public AudioBasicProcessorNode {
public:
- static PassRefPtr<DelayNode> create(AudioContext* context, double sampleRate)
+ static PassRefPtr<DelayNode> create(AudioContext* context, float sampleRate)
{
return adoptRef(new DelayNode(context, sampleRate));
}
AudioParam* delayTime();
private:
- DelayNode(AudioContext*, double sampleRate);
+ DelayNode(AudioContext*, float sampleRate);
DelayProcessor* delayProcessor() { return static_cast<DelayProcessor*>(processor()); }
};
namespace WebCore {
-DelayProcessor::DelayProcessor(double sampleRate, unsigned numberOfChannels)
+DelayProcessor::DelayProcessor(float sampleRate, unsigned numberOfChannels)
: AudioDSPKernelProcessor(sampleRate, numberOfChannels)
{
m_delayTime = AudioParam::create("delayTime", 0.0, 0.0, 1.0);
class DelayProcessor : public AudioDSPKernelProcessor {
public:
- DelayProcessor(double sampleRate, unsigned numberOfChannels);
+ DelayProcessor(float sampleRate, unsigned numberOfChannels);
virtual ~DelayProcessor();
virtual PassOwnPtr<AudioDSPKernel> createKernel();
namespace WebCore {
-DynamicsCompressorNode::DynamicsCompressorNode(AudioContext* context, double sampleRate)
+DynamicsCompressorNode::DynamicsCompressorNode(AudioContext* context, float sampleRate)
: AudioNode(context, sampleRate)
{
addInput(adoptPtr(new AudioNodeInput(this)));
class DynamicsCompressorNode : public AudioNode {
public:
- static PassRefPtr<DynamicsCompressorNode> create(AudioContext* context, double sampleRate)
+ static PassRefPtr<DynamicsCompressorNode> create(AudioContext* context, float sampleRate)
{
return adoptRef(new DynamicsCompressorNode(context, sampleRate));
}
virtual void uninitialize();
private:
- DynamicsCompressorNode(AudioContext*, double sampleRate);
+ DynamicsCompressorNode(AudioContext*, float sampleRate);
OwnPtr<DynamicsCompressor> m_dynamicsCompressor;
};
namespace WebCore {
-HighPass2FilterNode::HighPass2FilterNode(AudioContext* context, double sampleRate)
+HighPass2FilterNode::HighPass2FilterNode(AudioContext* context, float sampleRate)
: AudioBasicProcessorNode(context, sampleRate)
{
m_processor = adoptPtr(new BiquadProcessor(BiquadProcessor::HighPass, sampleRate, 1, false));
class HighPass2FilterNode : public AudioBasicProcessorNode {
public:
- static PassRefPtr<HighPass2FilterNode> create(AudioContext* context, double sampleRate)
+ static PassRefPtr<HighPass2FilterNode> create(AudioContext* context, float sampleRate)
{
return adoptRef(new HighPass2FilterNode(context, sampleRate));
}
AudioParam* resonance() { return biquadProcessor()->parameter2(); }
private:
- HighPass2FilterNode(AudioContext*, double sampleRate);
+ HighPass2FilterNode(AudioContext*, float sampleRate);
BiquadProcessor* biquadProcessor() { return static_cast<BiquadProcessor*>(processor()); }
};
const size_t DefaultBufferSize = 4096;
-PassRefPtr<JavaScriptAudioNode> JavaScriptAudioNode::create(AudioContext* context, double sampleRate, size_t bufferSize, unsigned numberOfInputs, unsigned numberOfOutputs)
+PassRefPtr<JavaScriptAudioNode> JavaScriptAudioNode::create(AudioContext* context, float sampleRate, size_t bufferSize, unsigned numberOfInputs, unsigned numberOfOutputs)
{
return adoptRef(new JavaScriptAudioNode(context, sampleRate, bufferSize, numberOfInputs, numberOfOutputs));
}
-JavaScriptAudioNode::JavaScriptAudioNode(AudioContext* context, double sampleRate, size_t bufferSize, unsigned numberOfInputs, unsigned numberOfOutputs)
+JavaScriptAudioNode::JavaScriptAudioNode(AudioContext* context, float sampleRate, size_t bufferSize, unsigned numberOfInputs, unsigned numberOfOutputs)
: AudioNode(context, sampleRate)
, m_doubleBufferIndex(0)
, m_doubleBufferIndexForEvent(0)
if (isInitialized())
return;
- double sampleRate = context()->sampleRate();
+ float sampleRate = context()->sampleRate();
// Create double buffers on both the input and output sides.
// These AudioBuffers will be directly accessed in the main thread by JavaScript.
// This value controls how frequently the onaudioprocess event handler is called and how many sample-frames need to be processed each call.
// Lower numbers for bufferSize will result in a lower (better) latency. Higher numbers will be necessary to avoid audio breakup and glitches.
// The value chosen must carefully balance between latency and audio quality.
- static PassRefPtr<JavaScriptAudioNode> create(AudioContext*, double sampleRate, size_t bufferSize, unsigned numberOfInputs = 1, unsigned numberOfOutputs = 1);
+ static PassRefPtr<JavaScriptAudioNode> create(AudioContext*, float sampleRate, size_t bufferSize, unsigned numberOfInputs = 1, unsigned numberOfOutputs = 1);
virtual ~JavaScriptAudioNode();
using AudioNode::deref;
private:
- JavaScriptAudioNode(AudioContext*, double sampleRate, size_t bufferSize, unsigned numberOfInputs, unsigned numberOfOutputs);
+ JavaScriptAudioNode(AudioContext*, float sampleRate, size_t bufferSize, unsigned numberOfInputs, unsigned numberOfOutputs);
static void fireProcessEventDispatch(void* userData);
void fireProcessEvent();
namespace WebCore {
-LowPass2FilterNode::LowPass2FilterNode(AudioContext* context, double sampleRate)
+LowPass2FilterNode::LowPass2FilterNode(AudioContext* context, float sampleRate)
: AudioBasicProcessorNode(context, sampleRate)
{
m_processor = adoptPtr(new BiquadProcessor(BiquadProcessor::LowPass, sampleRate, 1, false));
class LowPass2FilterNode : public AudioBasicProcessorNode {
public:
- static PassRefPtr<LowPass2FilterNode> create(AudioContext* context, double sampleRate)
+ static PassRefPtr<LowPass2FilterNode> create(AudioContext* context, float sampleRate)
{
return adoptRef(new LowPass2FilterNode(context, sampleRate));
}
AudioParam* resonance() { return biquadProcessor()->parameter2(); }
private:
- LowPass2FilterNode(AudioContext*, double sampleRate);
+ LowPass2FilterNode(AudioContext*, float sampleRate);
BiquadProcessor* biquadProcessor() { return static_cast<BiquadProcessor*>(processor()); }
};
virtual void initialize();
virtual void uninitialize();
- double sampleRate() const { return m_renderTarget->sampleRate(); }
+ float sampleRate() const { return m_renderTarget->sampleRate(); }
void startRendering();
namespace WebCore {
-RealtimeAnalyserNode::RealtimeAnalyserNode(AudioContext* context, double sampleRate)
+RealtimeAnalyserNode::RealtimeAnalyserNode(AudioContext* context, float sampleRate)
: AudioNode(context, sampleRate)
{
addInput(adoptPtr(new AudioNodeInput(this)));
class RealtimeAnalyserNode : public AudioNode {
public:
- static PassRefPtr<RealtimeAnalyserNode> create(AudioContext* context, double sampleRate)
+ static PassRefPtr<RealtimeAnalyserNode> create(AudioContext* context, float sampleRate)
{
return adoptRef(new RealtimeAnalyserNode(context, sampleRate));
}
void getByteTimeDomainData(Uint8Array* array) { m_analyser.getByteTimeDomainData(array); }
private:
- RealtimeAnalyserNode(AudioContext*, double sampleRate);
+ RealtimeAnalyserNode(AudioContext*, float sampleRate);
RealtimeAnalyser m_analyser;
};
const float input = source[i];
// Calculate an index based on input -1 -> +1 with 0 being at the center of the curve data.
- int index = curveLength * 0.5 * (input + 1);
+ int index = (curveLength * (input + 1)) / 2;
// Clip index to the input range of the curve.
// This takes care of input outside of nominal range -1 -> +1
namespace WebCore {
-WaveShaperProcessor::WaveShaperProcessor(double sampleRate, size_t numberOfChannels)
+WaveShaperProcessor::WaveShaperProcessor(float sampleRate, size_t numberOfChannels)
: AudioDSPKernelProcessor(sampleRate, numberOfChannels)
{
}
class WaveShaperProcessor : public AudioDSPKernelProcessor {
public:
- WaveShaperProcessor(double sampleRate, size_t numberOfChannels);
+ WaveShaperProcessor(float sampleRate, size_t numberOfChannels);
virtual ~WaveShaperProcessor();
+2011-10-05 Jer Noble <jer.noble@apple.com>
+
+ WEB_AUDIO does not compile on Leopard 32-bit.
+ https://bugs.webkit.org/show_bug.cgi?id=69292
+
+ Reviewed by Simon Fraser.
+
+ Platform-independent portions of WEB_AUDIO have changed from double -> float, and
+ platform-specific subclasses must change as well.
+
+ * src/AudioDestinationChromium.cpp:
+ (WebCore::AudioDestination::create):
+ (WebCore::AudioDestinationChromium::AudioDestinationChromium):
+ (WebCore::AudioDestination::hardwareSampleRate):
+ * src/AudioDestinationChromium.h:
+ (WebCore::AudioDestinationChromium::sampleRate):
+
2011-10-05 James Robinson <jamesr@chromium.org>
[chromium] Hook WebCompositor interface for input events up to the compositor proper
const unsigned numberOfChannels = 2;
// Factory method: Chromium-implementation
-PassOwnPtr<AudioDestination> AudioDestination::create(AudioSourceProvider& provider, double sampleRate)
+PassOwnPtr<AudioDestination> AudioDestination::create(AudioSourceProvider& provider, float sampleRate)
{
return adoptPtr(new AudioDestinationChromium(provider, sampleRate));
}
-AudioDestinationChromium::AudioDestinationChromium(AudioSourceProvider& provider, double sampleRate)
+AudioDestinationChromium::AudioDestinationChromium(AudioSourceProvider& provider, float sampleRate)
: m_provider(provider)
, m_renderBus(numberOfChannels, renderBufferSize, false)
, m_sampleRate(sampleRate)
}
}
-double AudioDestination::hardwareSampleRate()
+float AudioDestination::hardwareSampleRate()
{
- return webKitPlatformSupport()->audioHardwareSampleRate();
+ return static_cast<float>(webKitPlatformSupport()->audioHardwareSampleRate());
}
// Pulls on our provider to get the rendered audio stream.
class AudioDestinationChromium : public AudioDestination, public WebKit::WebAudioDevice::RenderCallback {
public:
- AudioDestinationChromium(AudioSourceProvider&, double sampleRate);
+ AudioDestinationChromium(AudioSourceProvider&, float sampleRate);
virtual ~AudioDestinationChromium();
virtual void start();
virtual void stop();
bool isPlaying() { return m_isPlaying; }
- double sampleRate() const { return m_sampleRate; }
+ float sampleRate() const { return m_sampleRate; }
// WebKit::WebAudioDevice::RenderCallback
virtual void render(const WebKit::WebVector<float*>& audioData, size_t numberOfFrames);
private:
AudioSourceProvider& m_provider;
AudioBus m_renderBus;
- double m_sampleRate;
+ float m_sampleRate;
bool m_isPlaying;
OwnPtr<WebKit::WebAudioDevice> m_audioDevice;
size_t m_callbackBufferSize;