+2012-02-22 Raymond Liu <raymond.liu@intel.com>
+
+ Have the DynamicsCompressorNode support multi-channel data
+ https://bugs.webkit.org/show_bug.cgi?id=77856
+
+ Reviewed by Chris Rogers.
+
+ * platform/audio/DynamicsCompressor.cpp:
+ (WebCore::DynamicsCompressor::DynamicsCompressor):
+ (WebCore::DynamicsCompressor::setEmphasisStageParameters):
+ (WebCore::DynamicsCompressor::process):
+ (WebCore::DynamicsCompressor::reset):
+ (WebCore::DynamicsCompressor::setNumberOfChannels):
+ (WebCore):
+ * platform/audio/DynamicsCompressor.h:
+ (DynamicsCompressor):
+ * platform/audio/DynamicsCompressorKernel.cpp:
+ (WebCore::DynamicsCompressorKernel::DynamicsCompressorKernel):
+ (WebCore::DynamicsCompressorKernel::setNumberOfChannels):
+ (WebCore):
+ (WebCore::DynamicsCompressorKernel::setPreDelayTime):
+ (WebCore::DynamicsCompressorKernel::process):
+ (WebCore::DynamicsCompressorKernel::reset):
+ * platform/audio/DynamicsCompressorKernel.h:
+ (DynamicsCompressorKernel):
+ * webaudio/DynamicsCompressorNode.cpp:
+ (WebCore::DynamicsCompressorNode::DynamicsCompressorNode):
+ (WebCore::DynamicsCompressorNode::initialize):
+
2012-02-22 Bear Travis <betravis@adobe.com>
Not correctly recalculating layout for elements within nested SVG elements
using namespace AudioUtilities;
-DynamicsCompressor::DynamicsCompressor(bool isStereo, float sampleRate)
- : m_isStereo(isStereo)
- , m_sampleRate(sampleRate)
- , m_compressor(sampleRate)
+DynamicsCompressor::DynamicsCompressor(float sampleRate, unsigned numberOfChannels)
+ : m_numberOfChannels(numberOfChannels)
+ , m_compressor(sampleRate, numberOfChannels)
{
// Uninitialized state - for parameter recalculation.
m_lastFilterStageRatio = -1;
m_lastAnchor = -1;
m_lastFilterStageGain = -1;
+ setNumberOfChannels(numberOfChannels);
initializeParameters();
}
float r1 = expf(-f1 * piFloat);
float r2 = expf(-f2 * piFloat);
- // Set pre-filter zero and pole to create an emphasis filter.
- m_preFilter[stageIndex].setZero(r1);
- m_preFilter[stageIndex].setPole(r2);
- m_preFilterR[stageIndex].setZero(r1);
- m_preFilterR[stageIndex].setPole(r2);
-
- // Set post-filter with zero and pole reversed to create the de-emphasis filter.
- // If there were no compressor kernel in between, they would cancel each other out (allpass filter).
- m_postFilter[stageIndex].setZero(r2);
- m_postFilter[stageIndex].setPole(r1);
- m_postFilterR[stageIndex].setZero(r2);
- m_postFilterR[stageIndex].setPole(r1);
+ ASSERT(m_numberOfChannels == m_preFilterPacks.size());
+
+ for (unsigned i = 0; i < m_numberOfChannels; ++i) {
+ // Set pre-filter zero and pole to create an emphasis filter.
+ ZeroPole& preFilter = m_preFilterPacks[i]->filters[stageIndex];
+ preFilter.setZero(r1);
+ preFilter.setPole(r2);
+
+ // Set post-filter with zero and pole reversed to create the de-emphasis filter.
+ // If there were no compressor kernel in between, they would cancel each other out (allpass filter).
+ ZeroPole& postFilter = m_postFilterPacks[i]->filters[stageIndex];
+ postFilter.setZero(r2);
+ postFilter.setPole(r1);
+ }
}
void DynamicsCompressor::setEmphasisParameters(float gain, float anchorFreq, float filterStageRatio)
void DynamicsCompressor::process(const AudioBus* sourceBus, AudioBus* destinationBus, unsigned framesToProcess)
{
- const float* sourceL = sourceBus->channel(0)->data();
- const float* sourceR;
+ // Though numberOfChannels is retrived from destinationBus, we still name it numberOfChannels instead of numberOfDestinationChannels.
+ // It's because we internally match sourceChannels's size to destinationBus by channel up/down mix. Thus we need numberOfChannels
+ // to do the loop work for both m_sourceChannels and m_destinationChannels.
+
+ unsigned numberOfChannels = destinationBus->numberOfChannels();
+ unsigned numberOfSourceChannels = sourceBus->numberOfChannels();
- if (sourceBus->numberOfChannels() > 1)
- sourceR = sourceBus->channel(1)->data();
- else
- sourceR = sourceL;
+ ASSERT(numberOfChannels == m_numberOfChannels && numberOfSourceChannels);
- ASSERT(destinationBus->numberOfChannels() == 2);
+ if (numberOfChannels != m_numberOfChannels || !numberOfSourceChannels) {
+ destinationBus->zero();
+ return;
+ }
+
+ switch (numberOfChannels) {
+ case 2: // stereo
+ m_sourceChannels[0] = sourceBus->channel(0)->data();
+
+ if (numberOfSourceChannels > 1)
+ m_sourceChannels[1] = sourceBus->channel(1)->data();
+ else
+ // Simply duplicate mono channel input data to right channel for stereo processing.
+ m_sourceChannels[1] = m_sourceChannels[0];
+
+ break;
+ default:
+ // FIXME : support other number of channels.
+ ASSERT_NOT_REACHED();
+ destinationBus->zero();
+ return;
+ }
- float* destinationL = destinationBus->channel(0)->mutableData();
- float* destinationR = destinationBus->channel(1)->mutableData();
+ for (unsigned i = 0; i < numberOfChannels; ++i)
+ m_destinationChannels[i] = destinationBus->channel(i)->mutableData();
float filterStageGain = parameterValue(ParamFilterStageGain);
float filterStageRatio = parameterValue(ParamFilterStageRatio);
// Apply pre-emphasis filter.
// Note that the final three stages are computed in-place in the destination buffer.
- m_preFilter[0].process(sourceL, destinationL, framesToProcess);
- m_preFilter[1].process(destinationL, destinationL, framesToProcess);
- m_preFilter[2].process(destinationL, destinationL, framesToProcess);
- m_preFilter[3].process(destinationL, destinationL, framesToProcess);
-
- if (isStereo()) {
- m_preFilterR[0].process(sourceR, destinationR, framesToProcess);
- m_preFilterR[1].process(destinationR, destinationR, framesToProcess);
- m_preFilterR[2].process(destinationR, destinationR, framesToProcess);
- m_preFilterR[3].process(destinationR, destinationR, framesToProcess);
+ for (unsigned i = 0; i < numberOfChannels; ++i) {
+ const float* sourceData = m_sourceChannels[i];
+ float* destinationData = m_destinationChannels[i];
+ ZeroPole* preFilters = m_preFilterPacks[i]->filters;
+
+ preFilters[0].process(sourceData, destinationData, framesToProcess);
+ preFilters[1].process(destinationData, destinationData, framesToProcess);
+ preFilters[2].process(destinationData, destinationData, framesToProcess);
+ preFilters[3].process(destinationData, destinationData, framesToProcess);
}
float dbThreshold = parameterValue(ParamThreshold);
// Apply compression to the pre-filtered signal.
// The processing is performed in place.
- m_compressor.process(destinationL,
- destinationL,
- destinationR,
- destinationR,
+ m_compressor.process(m_destinationChannels.get(),
+ m_destinationChannels.get(),
+ numberOfChannels,
framesToProcess,
dbThreshold,
);
// Apply de-emphasis filter.
- m_postFilter[0].process(destinationL, destinationL, framesToProcess);
- m_postFilter[1].process(destinationL, destinationL, framesToProcess);
- m_postFilter[2].process(destinationL, destinationL, framesToProcess);
- m_postFilter[3].process(destinationL, destinationL, framesToProcess);
-
- if (isStereo()) {
- m_postFilterR[0].process(destinationR, destinationR, framesToProcess);
- m_postFilterR[1].process(destinationR, destinationR, framesToProcess);
- m_postFilterR[2].process(destinationR, destinationR, framesToProcess);
- m_postFilterR[3].process(destinationR, destinationR, framesToProcess);
+ for (unsigned i = 0; i < numberOfChannels; ++i) {
+ float* destinationData = m_destinationChannels[i];
+ ZeroPole* postFilters = m_postFilterPacks[i]->filters;
+
+ postFilters[0].process(destinationData, destinationData, framesToProcess);
+ postFilters[1].process(destinationData, destinationData, framesToProcess);
+ postFilters[2].process(destinationData, destinationData, framesToProcess);
+ postFilters[3].process(destinationData, destinationData, framesToProcess);
}
}
m_lastAnchor = -1;
m_lastFilterStageGain = -1;
- for (unsigned i = 0; i < 4; ++i) {
- m_preFilter[i].reset();
- m_preFilterR[i].reset();
- m_postFilter[i].reset();
- m_postFilterR[i].reset();
+ for (unsigned channel = 0; channel < m_numberOfChannels; ++channel) {
+ for (unsigned stageIndex = 0; stageIndex < 4; ++stageIndex) {
+ m_preFilterPacks[channel]->filters[stageIndex].reset();
+ m_postFilterPacks[channel]->filters[stageIndex].reset();
+ }
}
m_compressor.reset();
}
+void DynamicsCompressor::setNumberOfChannels(unsigned numberOfChannels)
+{
+ if (m_preFilterPacks.size() == numberOfChannels)
+ return;
+
+ m_preFilterPacks.clear();
+ m_postFilterPacks.clear();
+ for (unsigned i = 0; i < numberOfChannels; ++i) {
+ m_preFilterPacks.append(adoptPtr(new ZeroPoleFilterPack4()));
+ m_postFilterPacks.append(adoptPtr(new ZeroPoleFilterPack4()));
+ }
+
+ m_sourceChannels = adoptArrayPtr(new const float* [numberOfChannels]);
+ m_destinationChannels = adoptArrayPtr(new float* [numberOfChannels]);
+
+ m_compressor.setNumberOfChannels(numberOfChannels);
+ m_numberOfChannels = numberOfChannels;
+}
+
} // namespace WebCore
#endif // ENABLE(WEB_AUDIO)
#include "DynamicsCompressorKernel.h"
#include "ZeroPole.h"
+#include <wtf/OwnArrayPtr.h>
+
namespace WebCore {
class AudioBus;
ParamLast
};
- DynamicsCompressor(bool isStereo, float sampleRate);
+ DynamicsCompressor(float sampleRate, unsigned numberOfChannels);
void process(const AudioBus* sourceBus, AudioBus* destinationBus, unsigned framesToProcess);
void reset();
+ void setNumberOfChannels(unsigned);
float parameterValue(unsigned parameterID);
- bool isStereo() const { return m_isStereo; }
float sampleRate() const { return m_sampleRate; }
float nyquist() const { return m_sampleRate / 2; }
protected:
+ unsigned m_numberOfChannels;
+
// m_parameters holds the tweakable compressor parameters.
// FIXME: expose some of the most important ones (such as threshold, attack, release)
// as DynamicsCompressorNode attributes.
float m_parameters[ParamLast];
void initializeParameters();
- bool m_isStereo;
float m_sampleRate;
// Emphasis filter controls.
float m_lastAnchor;
float m_lastFilterStageGain;
- // Emphasis filters.
- ZeroPole m_preFilter[4];
- ZeroPole m_preFilterR[4];
- ZeroPole m_postFilter[4];
- ZeroPole m_postFilterR[4];
+ typedef struct {
+ ZeroPole filters[4];
+ } ZeroPoleFilterPack4;
+
+ // Per-channel emphasis filters.
+ Vector<OwnPtr<ZeroPoleFilterPack4> > m_preFilterPacks;
+ Vector<OwnPtr<ZeroPoleFilterPack4> > m_postFilterPacks;
+
+ OwnArrayPtr<const float*> m_sourceChannels;
+ OwnArrayPtr<float*> m_destinationChannels;
void setEmphasisStageParameters(unsigned stageIndex, float gain, float normalizedFrequency /* 0 -> 1 */);
void setEmphasisParameters(float gain, float anchorFreq, float filterStageRatio);
return 1 - exp(-k * x);
}
-DynamicsCompressorKernel::DynamicsCompressorKernel(float sampleRate)
+DynamicsCompressorKernel::DynamicsCompressorKernel(float sampleRate, unsigned numberOfChannels)
: m_sampleRate(sampleRate)
, m_lastPreDelayFrames(DefaultPreDelayFrames)
- , m_preDelayBufferL(MaxPreDelayFrames)
- , m_preDelayBufferR(MaxPreDelayFrames)
, m_preDelayReadIndex(0)
, m_preDelayWriteIndex(DefaultPreDelayFrames)
{
+ setNumberOfChannels(numberOfChannels);
+
// Initializes most member variables
reset();
m_meteringReleaseK = discreteTimeConstantForSampleRate(meteringReleaseTimeConstant, sampleRate);
}
+void DynamicsCompressorKernel::setNumberOfChannels(unsigned numberOfChannels)
+{
+ if (m_preDelayBuffers.size() == numberOfChannels)
+ return;
+
+ m_preDelayBuffers.clear();
+ for (unsigned i = 0; i < numberOfChannels; ++i)
+ m_preDelayBuffers.append(adoptPtr(new AudioFloatArray(MaxPreDelayFrames)));
+}
+
void DynamicsCompressorKernel::setPreDelayTime(float preDelayTime)
{
// Re-configure look-ahead section pre-delay if delay time has changed.
if (m_lastPreDelayFrames != preDelayFrames) {
m_lastPreDelayFrames = preDelayFrames;
- m_preDelayBufferL.zero();
- m_preDelayBufferR.zero();
+ for (unsigned i = 0; i < m_preDelayBuffers.size(); ++i)
+ m_preDelayBuffers[i]->zero();
+
m_preDelayReadIndex = 0;
m_preDelayWriteIndex = preDelayFrames;
}
}
-void DynamicsCompressorKernel::process(const float* sourceL,
- float* destinationL,
- const float* sourceR, /* stereo-linked */
- float* destinationR,
+void DynamicsCompressorKernel::process(float* sourceChannels[],
+ float* destinationChannels[],
+ unsigned numberOfChannels,
unsigned framesToProcess,
float dbThreshold,
float releaseZone4
)
{
- bool isStereo = destinationR;
+ ASSERT(m_preDelayBuffers.size() == numberOfChannels);
+
float sampleRate = this->sampleRate();
float dryMix = 1 - effectBlend;
const int nDivisions = framesToProcess / nDivisionFrames;
+ unsigned frameIndex = 0;
for (int i = 0; i < nDivisions; ++i) {
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// Calculate desired gain
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
{
- float* delayBufferL = m_preDelayBufferL.data();
- float* delayBufferR = m_preDelayBufferR.data();
int preDelayReadIndex = m_preDelayReadIndex;
int preDelayWriteIndex = m_preDelayWriteIndex;
float detectorAverage = m_detectorAverage;
int loopFrames = nDivisionFrames;
while (loopFrames--) {
- float compressorInput;
- float inputL;
- float inputR = 0;
+ float compressorInput = 0;
// Predelay signal, computing compression amount from un-delayed version.
- if (isStereo) {
- float undelayedL = *sourceL++;
- float undelayedR = *sourceR++;
-
- compressorInput = 0.5f * (undelayedL + undelayedR);
-
- inputL = delayBufferL[preDelayReadIndex];
- inputR = delayBufferR[preDelayReadIndex];
-
- delayBufferL[preDelayWriteIndex] = undelayedL;
- delayBufferR[preDelayWriteIndex] = undelayedR;
- } else {
- compressorInput = *sourceL++;
-
- inputL = delayBufferL[preDelayReadIndex];
- delayBufferL[preDelayWriteIndex] = compressorInput;
+ for (unsigned i = 0; i < numberOfChannels; ++i) {
+ float* delayBuffer = m_preDelayBuffers[i]->data();
+ float undelayedSource = sourceChannels[i][frameIndex];
+ delayBuffer[preDelayWriteIndex] = undelayedSource;
+
+ float absUndelayedSource = undelayedSource > 0 ? undelayedSource : -undelayedSource;
+ if (compressorInput < absUndelayedSource)
+ compressorInput = absUndelayedSource;
}
- preDelayReadIndex = (preDelayReadIndex + 1) & MaxPreDelayFramesMask;
- preDelayWriteIndex = (preDelayWriteIndex + 1) & MaxPreDelayFramesMask;
-
// Calculate shaped power on undelayed input.
float scaledInput = compressorInput;
m_meteringGain += (dbRealGain - m_meteringGain) * m_meteringReleaseK;
// Apply final gain.
- if (isStereo) {
- float outputL = inputL;
- float outputR = inputR;
-
- outputL *= totalGain;
- outputR *= totalGain;
+ for (unsigned i = 0; i < numberOfChannels; ++i) {
+ float* delayBuffer = m_preDelayBuffers[i]->data();
+ destinationChannels[i][frameIndex] = delayBuffer[preDelayReadIndex] * totalGain;
+ }
- *destinationL++ = outputL;
- *destinationR++ = outputR;
- } else
- *destinationL++ = inputL * totalGain;
+ frameIndex++;
+ preDelayReadIndex = (preDelayReadIndex + 1) & MaxPreDelayFramesMask;
+ preDelayWriteIndex = (preDelayWriteIndex + 1) & MaxPreDelayFramesMask;
}
// Locals back to member variables.
m_meteringGain = 1;
// Predelay section.
- m_preDelayBufferL.zero();
- m_preDelayBufferR.zero();
+ for (unsigned i = 0; i < m_preDelayBuffers.size(); ++i)
+ m_preDelayBuffers[i]->zero();
+
m_preDelayReadIndex = 0;
m_preDelayWriteIndex = DefaultPreDelayFrames;
#include "AudioArray.h"
+#include <wtf/OwnPtr.h>
+#include <wtf/PassOwnPtr.h>
+
namespace WebCore {
class DynamicsCompressorKernel {
public:
- DynamicsCompressorKernel(float sampleRate);
+ DynamicsCompressorKernel(float sampleRate, unsigned numberOfChannels);
+
+ void setNumberOfChannels(unsigned);
// Performs stereo-linked compression.
- void process(const float *sourceL,
- float *destinationL,
- const float *sourceR,
- float *destinationR,
+ void process(float* sourceChannels[],
+ float* destinationChannels[],
+ unsigned numberOfChannels,
unsigned framesToProcess,
float dbThreshold,
protected:
float m_sampleRate;
-
+
float m_detectorAverage;
float m_compressorGain;
unsigned m_lastPreDelayFrames;
void setPreDelayTime(float);
- AudioFloatArray m_preDelayBufferL;
- AudioFloatArray m_preDelayBufferR;
+ Vector<OwnPtr<AudioFloatArray> > m_preDelayBuffers;
int m_preDelayReadIndex;
int m_preDelayWriteIndex;
#include "AudioNodeOutput.h"
#include "DynamicsCompressor.h"
+// Set output to stereo by default.
+static const unsigned defaultNumberOfOutputChannels = 2;
+
namespace WebCore {
DynamicsCompressorNode::DynamicsCompressorNode(AudioContext* context, float sampleRate)
: AudioNode(context, sampleRate)
{
addInput(adoptPtr(new AudioNodeInput(this)));
- addOutput(adoptPtr(new AudioNodeOutput(this, 2)));
+ addOutput(adoptPtr(new AudioNodeOutput(this, defaultNumberOfOutputChannels)));
setNodeType(NodeTypeDynamicsCompressor);
return;
AudioNode::initialize();
- m_dynamicsCompressor = adoptPtr(new DynamicsCompressor(true, sampleRate()));
+ m_dynamicsCompressor = adoptPtr(new DynamicsCompressor(sampleRate(), defaultNumberOfOutputChannels));
}
void DynamicsCompressorNode::uninitialize()