2 * Copyright (C) 2010, Google Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
15 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
16 * DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
17 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
18 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
19 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
20 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
21 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
22 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 #include "PannerNode.h"
31 #include "AudioBufferSourceNode.h"
33 #include "AudioContext.h"
34 #include "AudioNodeInput.h"
35 #include "AudioNodeOutput.h"
36 #include "ExceptionCode.h"
37 #include "HRTFPanner.h"
38 #include "ScriptExecutionContext.h"
39 #include <wtf/MathExtras.h>
45 static void fixNANs(double &x)
47 if (isnan(x) || isinf(x))
51 PannerNode::PannerNode(AudioContext* context, float sampleRate)
52 : AudioNode(context, sampleRate)
53 , m_panningModel(Panner::PanningModelHRTF)
55 , m_connectionCount(0)
57 addInput(adoptPtr(new AudioNodeInput(this)));
58 addOutput(adoptPtr(new AudioNodeOutput(this, 2)));
60 m_distanceGain = AudioGain::create(context, "distanceGain", 1.0, 0.0, 1.0);
61 m_coneGain = AudioGain::create(context, "coneGain", 1.0, 0.0, 1.0);
63 m_position = FloatPoint3D(0, 0, 0);
64 m_orientation = FloatPoint3D(1, 0, 0);
65 m_velocity = FloatPoint3D(0, 0, 0);
67 setNodeType(NodeTypePanner);
72 PannerNode::~PannerNode()
77 void PannerNode::pullInputs(size_t framesToProcess)
79 // We override pullInputs(), so we can detect new AudioSourceNodes which have connected to us when new connections are made.
80 // These AudioSourceNodes need to be made aware of our existence in order to handle doppler shift pitch changes.
81 if (m_connectionCount != context()->connectionCount()) {
82 m_connectionCount = context()->connectionCount();
84 // Recursively go through all nodes connected to us.
85 notifyAudioSourcesConnectedToNode(this);
88 AudioNode::pullInputs(framesToProcess);
91 void PannerNode::process(size_t framesToProcess)
93 AudioBus* destination = output(0)->bus();
95 if (!isInitialized() || !input(0)->isConnected() || !m_panner.get()) {
100 AudioBus* source = input(0)->bus();
107 // Apply the panning effect.
110 getAzimuthElevation(&azimuth, &elevation);
111 m_panner->pan(azimuth, elevation, source, destination, framesToProcess);
113 // Get the distance and cone gain.
114 double totalGain = distanceConeGain();
116 // Snap to desired gain at the beginning.
117 if (m_lastGain == -1.0)
118 m_lastGain = totalGain;
120 // Apply gain in-place with de-zippering.
121 destination->copyWithGainFrom(*destination, &m_lastGain, totalGain);
124 void PannerNode::reset()
126 m_lastGain = -1.0; // force to snap to initial gain
131 void PannerNode::initialize()
136 m_panner = Panner::create(m_panningModel, sampleRate());
138 AudioNode::initialize();
141 void PannerNode::uninitialize()
143 if (!isInitialized())
147 AudioNode::uninitialize();
150 AudioListener* PannerNode::listener()
152 return context()->listener();
155 String PannerNode::panningModel() const
157 switch (m_panningModel) {
165 ASSERT_NOT_REACHED();
170 void PannerNode::setPanningModel(const String& model)
172 if (model == "equalpower")
173 setPanningModel(EQUALPOWER);
174 else if (model == "HRTF")
175 setPanningModel(HRTF);
176 else if (model == "soundfield")
177 setPanningModel(SOUNDFIELD);
179 ASSERT_NOT_REACHED();
182 bool PannerNode::setPanningModel(unsigned model)
187 if (!m_panner.get() || model != m_panningModel) {
188 OwnPtr<Panner> newPanner = Panner::create(model, sampleRate());
189 m_panner = newPanner.release();
190 m_panningModel = model;
194 // FIXME: Implement sound field model. See // https://bugs.webkit.org/show_bug.cgi?id=77367.
195 context()->scriptExecutionContext()->addConsoleMessage(JSMessageSource, WarningMessageLevel, "'soundfield' panning model not implemented.");
204 String PannerNode::distanceModel() const
206 switch (const_cast<PannerNode*>(this)->m_distanceEffect.model()) {
207 case DistanceEffect::ModelLinear:
209 case DistanceEffect::ModelInverse:
211 case DistanceEffect::ModelExponential:
212 return "exponential";
214 ASSERT_NOT_REACHED();
219 void PannerNode::setDistanceModel(const String& model)
221 if (model == "linear")
222 setDistanceModel(DistanceEffect::ModelLinear);
223 else if (model == "inverse")
224 setDistanceModel(DistanceEffect::ModelInverse);
225 else if (model == "exponential")
226 setDistanceModel(DistanceEffect::ModelExponential);
228 ASSERT_NOT_REACHED();
231 bool PannerNode::setDistanceModel(unsigned model)
234 case DistanceEffect::ModelLinear:
235 case DistanceEffect::ModelInverse:
236 case DistanceEffect::ModelExponential:
237 m_distanceEffect.setModel(static_cast<DistanceEffect::ModelType>(model), true);
246 void PannerNode::getAzimuthElevation(double* outAzimuth, double* outElevation)
248 // FIXME: we should cache azimuth and elevation (if possible), so we only re-calculate if a change has been made.
250 double azimuth = 0.0;
252 // Calculate the source-listener vector
253 FloatPoint3D listenerPosition = listener()->position();
254 FloatPoint3D sourceListener = m_position - listenerPosition;
256 if (sourceListener.isZero()) {
257 // degenerate case if source and listener are at the same point
263 sourceListener.normalize();
266 FloatPoint3D listenerFront = listener()->orientation();
267 FloatPoint3D listenerUp = listener()->upVector();
268 FloatPoint3D listenerRight = listenerFront.cross(listenerUp);
269 listenerRight.normalize();
271 FloatPoint3D listenerFrontNorm = listenerFront;
272 listenerFrontNorm.normalize();
274 FloatPoint3D up = listenerRight.cross(listenerFrontNorm);
276 float upProjection = sourceListener.dot(up);
278 FloatPoint3D projectedSource = sourceListener - upProjection * up;
279 projectedSource.normalize();
281 azimuth = 180.0 * acos(projectedSource.dot(listenerRight)) / piDouble;
282 fixNANs(azimuth); // avoid illegal values
284 // Source in front or behind the listener
285 double frontBack = projectedSource.dot(listenerFrontNorm);
287 azimuth = 360.0 - azimuth;
289 // Make azimuth relative to "front" and not "right" listener vector
290 if ((azimuth >= 0.0) && (azimuth <= 270.0))
291 azimuth = 90.0 - azimuth;
293 azimuth = 450.0 - azimuth;
296 double elevation = 90.0 - 180.0 * acos(sourceListener.dot(up)) / piDouble;
297 fixNANs(elevation); // avoid illegal values
299 if (elevation > 90.0)
300 elevation = 180.0 - elevation;
301 else if (elevation < -90.0)
302 elevation = -180.0 - elevation;
305 *outAzimuth = azimuth;
307 *outElevation = elevation;
310 float PannerNode::dopplerRate()
312 double dopplerShift = 1.0;
314 // FIXME: optimize for case when neither source nor listener has changed...
315 double dopplerFactor = listener()->dopplerFactor();
317 if (dopplerFactor > 0.0) {
318 double speedOfSound = listener()->speedOfSound();
320 const FloatPoint3D &sourceVelocity = m_velocity;
321 const FloatPoint3D &listenerVelocity = listener()->velocity();
323 // Don't bother if both source and listener have no velocity
324 bool sourceHasVelocity = !sourceVelocity.isZero();
325 bool listenerHasVelocity = !listenerVelocity.isZero();
327 if (sourceHasVelocity || listenerHasVelocity) {
328 // Calculate the source to listener vector
329 FloatPoint3D listenerPosition = listener()->position();
330 FloatPoint3D sourceToListener = m_position - listenerPosition;
332 double sourceListenerMagnitude = sourceToListener.length();
334 double listenerProjection = sourceToListener.dot(listenerVelocity) / sourceListenerMagnitude;
335 double sourceProjection = sourceToListener.dot(sourceVelocity) / sourceListenerMagnitude;
337 listenerProjection = -listenerProjection;
338 sourceProjection = -sourceProjection;
340 double scaledSpeedOfSound = speedOfSound / dopplerFactor;
341 listenerProjection = min(listenerProjection, scaledSpeedOfSound);
342 sourceProjection = min(sourceProjection, scaledSpeedOfSound);
344 dopplerShift = ((speedOfSound - dopplerFactor * listenerProjection) / (speedOfSound - dopplerFactor * sourceProjection));
345 fixNANs(dopplerShift); // avoid illegal values
347 // Limit the pitch shifting to 4 octaves up and 3 octaves down.
348 if (dopplerShift > 16.0)
350 else if (dopplerShift < 0.125)
351 dopplerShift = 0.125;
355 return static_cast<float>(dopplerShift);
358 float PannerNode::distanceConeGain()
360 FloatPoint3D listenerPosition = listener()->position();
362 double listenerDistance = m_position.distanceTo(listenerPosition);
363 double distanceGain = m_distanceEffect.gain(listenerDistance);
365 m_distanceGain->setValue(static_cast<float>(distanceGain));
367 // FIXME: could optimize by caching coneGain
368 double coneGain = m_coneEffect.gain(m_position, m_orientation, listenerPosition);
370 m_coneGain->setValue(static_cast<float>(coneGain));
372 return float(distanceGain * coneGain);
375 void PannerNode::notifyAudioSourcesConnectedToNode(AudioNode* node)
381 // First check if this node is an AudioBufferSourceNode. If so, let it know about us so that doppler shift pitch can be taken into account.
382 if (node->nodeType() == NodeTypeAudioBufferSource) {
383 AudioBufferSourceNode* bufferSourceNode = reinterpret_cast<AudioBufferSourceNode*>(node);
384 bufferSourceNode->setPannerNode(this);
386 // Go through all inputs to this node.
387 for (unsigned i = 0; i < node->numberOfInputs(); ++i) {
388 AudioNodeInput* input = node->input(i);
390 // For each input, go through all of its connections, looking for AudioBufferSourceNodes.
391 for (unsigned j = 0; j < input->numberOfRenderingConnections(); ++j) {
392 AudioNodeOutput* connectedOutput = input->renderingOutput(j);
393 AudioNode* connectedNode = connectedOutput->node();
394 notifyAudioSourcesConnectedToNode(connectedNode); // recurse
400 } // namespace WebCore
402 #endif // ENABLE(WEB_AUDIO)