2 * Copyright (C) 2010, Google Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
15 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
16 * DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
17 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
18 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
19 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
20 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
21 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
22 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 #include "PannerNode.h"
30 #include "AudioBufferSourceNode.h"
32 #include "AudioContext.h"
33 #include "AudioNodeInput.h"
34 #include "AudioNodeOutput.h"
35 #include "AudioUtilities.h"
36 #include "ChannelCountMode.h"
37 #include "HRTFDatabaseLoader.h"
38 #include "HRTFPanner.h"
39 #include "ScriptExecutionContext.h"
40 #include <wtf/IsoMallocInlines.h>
41 #include <wtf/MathExtras.h>
45 WTF_MAKE_ISO_ALLOCATED_IMPL(PannerNode);
47 static void fixNANs(double &x)
49 if (std::isnan(x) || std::isinf(x))
53 PannerNodeBase::PannerNodeBase(BaseAudioContext& context)
58 ExceptionOr<Ref<PannerNode>> PannerNode::create(BaseAudioContext& context, const PannerOptions& options)
60 if (context.isStopped())
61 return Exception { InvalidStateError };
63 context.lazyInitialize();
65 auto panner = adoptRef(*new PannerNode(context, options));
67 auto result = panner->handleAudioNodeOptions(options, { 2, ChannelCountMode::ClampedMax, ChannelInterpretation::Speakers });
68 if (result.hasException())
69 return result.releaseException();
71 result = panner->setMaxDistance(options.maxDistance);
72 if (result.hasException())
73 return result.releaseException();
75 result = panner->setRefDistance(options.refDistance);
76 if (result.hasException())
77 return result.releaseException();
79 result = panner->setRolloffFactor(options.rolloffFactor);
80 if (result.hasException())
81 return result.releaseException();
83 result = panner->setConeOuterGain(options.coneOuterGain);
84 if (result.hasException())
85 return result.releaseException();
90 PannerNode::PannerNode(BaseAudioContext& context, const PannerOptions& options)
91 : PannerNodeBase(context)
92 , m_panningModel(options.panningModel)
93 , m_positionX(AudioParam::create(context, "positionX"_s, options.positionX, -FLT_MAX, FLT_MAX, AutomationRate::ARate))
94 , m_positionY(AudioParam::create(context, "positionY"_s, options.positionY, -FLT_MAX, FLT_MAX, AutomationRate::ARate))
95 , m_positionZ(AudioParam::create(context, "positionZ"_s, options.positionZ, -FLT_MAX, FLT_MAX, AutomationRate::ARate))
96 , m_orientationX(AudioParam::create(context, "orientationX"_s, options.orientationX, -FLT_MAX, FLT_MAX, AutomationRate::ARate))
97 , m_orientationY(AudioParam::create(context, "orientationY"_s, options.orientationY, -FLT_MAX, FLT_MAX, AutomationRate::ARate))
98 , m_orientationZ(AudioParam::create(context, "orientationZ"_s, options.orientationZ, -FLT_MAX, FLT_MAX, AutomationRate::ARate))
99 // Load the HRTF database asynchronously so we don't block the Javascript thread while creating the HRTF database.
100 , m_hrtfDatabaseLoader(HRTFDatabaseLoader::createAndLoadAsynchronouslyIfNecessary(context.sampleRate()))
102 setNodeType(NodeTypePanner);
104 setDistanceModel(options.distanceModel);
105 setConeInnerAngle(options.coneInnerAngle);
106 setConeOuterAngle(options.coneOuterAngle);
114 PannerNode::~PannerNode()
119 void PannerNode::pullInputs(size_t framesToProcess)
121 // We override pullInputs(), so we can detect new AudioSourceNodes which have connected to us when new connections are made.
122 // These AudioSourceNodes need to be made aware of our existence in order to handle doppler shift pitch changes.
123 if (m_connectionCount != context().connectionCount()) {
124 m_connectionCount = context().connectionCount();
126 // Recursively go through all nodes connected to us.
127 HashSet<AudioNode*> visitedNodes;
128 notifyAudioSourcesConnectedToNode(this, visitedNodes);
131 AudioNode::pullInputs(framesToProcess);
134 void PannerNode::process(size_t framesToProcess)
136 AudioBus* destination = output(0)->bus();
138 if (!isInitialized() || !input(0)->isConnected() || !m_panner.get()) {
143 AudioBus* source = input(0)->bus();
149 // HRTFDatabase should be loaded before proceeding for offline audio context when panningModel() is "HRTF".
150 if (panningModel() == PanningModelType::HRTF && !m_hrtfDatabaseLoader->isLoaded()) {
151 if (context().isOfflineContext())
152 m_hrtfDatabaseLoader->waitForLoaderThreadCompletion();
159 // The audio thread can't block on this lock, so we use std::try_to_lock instead.
160 std::unique_lock<Lock> lock(m_pannerMutex, std::try_to_lock);
161 if (!lock.owns_lock()) {
162 // Too bad - The try_lock() failed. We must be in the middle of changing the panner.
167 if ((hasSampleAccurateValues() || listener().hasSampleAccurateValues()) && (shouldUseARate() || listener().shouldUseARate())) {
168 processSampleAccurateValues(destination, source, framesToProcess);
172 // Apply the panning effect.
175 azimuthElevation(&azimuth, &elevation);
176 m_panner->pan(azimuth, elevation, source, destination, framesToProcess);
178 // Get the distance and cone gain.
179 double totalGain = distanceConeGain();
181 // Apply gain in-place.
182 destination->copyWithGainFrom(*destination, totalGain);
185 void PannerNode::processOnlyAudioParams(size_t framesToProcess)
187 float values[AudioUtilities::renderQuantumSize];
188 ASSERT(framesToProcess <= AudioUtilities::renderQuantumSize);
190 m_positionX->calculateSampleAccurateValues(values, framesToProcess);
191 m_positionY->calculateSampleAccurateValues(values, framesToProcess);
192 m_positionZ->calculateSampleAccurateValues(values, framesToProcess);
194 m_orientationX->calculateSampleAccurateValues(values, framesToProcess);
195 m_orientationY->calculateSampleAccurateValues(values, framesToProcess);
196 m_orientationZ->calculateSampleAccurateValues(values, framesToProcess);
198 listener().updateValuesIfNeeded(framesToProcess);
201 void PannerNode::processSampleAccurateValues(AudioBus* destination, const AudioBus* source, size_t framesToProcess)
203 // Get the sample accurate values from all of the AudioParams, including the
204 // values from the AudioListener.
205 float pannerX[AudioUtilities::renderQuantumSize];
206 float pannerY[AudioUtilities::renderQuantumSize];
207 float pannerZ[AudioUtilities::renderQuantumSize];
209 float orientationX[AudioUtilities::renderQuantumSize];
210 float orientationY[AudioUtilities::renderQuantumSize];
211 float orientationZ[AudioUtilities::renderQuantumSize];
213 m_positionX->calculateSampleAccurateValues(pannerX, framesToProcess);
214 m_positionY->calculateSampleAccurateValues(pannerY, framesToProcess);
215 m_positionZ->calculateSampleAccurateValues(pannerZ, framesToProcess);
216 m_orientationX->calculateSampleAccurateValues(orientationX, framesToProcess);
217 m_orientationY->calculateSampleAccurateValues(orientationY, framesToProcess);
218 m_orientationZ->calculateSampleAccurateValues(orientationZ, framesToProcess);
220 // Get the automation values from the listener.
221 const float* listenerX = listener().positionXValues(AudioUtilities::renderQuantumSize);
222 const float* listenerY = listener().positionYValues(AudioUtilities::renderQuantumSize);
223 const float* listenerZ = listener().positionZValues(AudioUtilities::renderQuantumSize);
225 const float* forwardX = listener().forwardXValues(AudioUtilities::renderQuantumSize);
226 const float* forwardY = listener().forwardYValues(AudioUtilities::renderQuantumSize);
227 const float* forwardZ = listener().forwardZValues(AudioUtilities::renderQuantumSize);
229 const float* upX = listener().upXValues(AudioUtilities::renderQuantumSize);
230 const float* upY = listener().upYValues(AudioUtilities::renderQuantumSize);
231 const float* upZ = listener().upZValues(AudioUtilities::renderQuantumSize);
233 // Compute the azimuth, elevation, and total gains for each position.
234 double azimuth[AudioUtilities::renderQuantumSize];
235 double elevation[AudioUtilities::renderQuantumSize];
236 float totalGain[AudioUtilities::renderQuantumSize];
238 for (size_t k = 0; k < framesToProcess; ++k) {
239 FloatPoint3D pannerPosition(pannerX[k], pannerY[k], pannerZ[k]);
240 FloatPoint3D orientation(orientationX[k], orientationY[k], orientationZ[k]);
241 FloatPoint3D listenerPosition(listenerX[k], listenerY[k], listenerZ[k]);
242 FloatPoint3D listenerFront(forwardX[k], forwardY[k], forwardZ[k]);
243 FloatPoint3D listenerUp(upX[k], upY[k], upZ[k]);
245 calculateAzimuthElevation(&azimuth[k], &elevation[k], pannerPosition, listenerPosition, listenerFront, listenerUp);
247 // Get distance and cone gain
248 totalGain[k] = calculateDistanceConeGain(pannerPosition, orientation, listenerPosition);
251 m_panner->panWithSampleAccurateValues(azimuth, elevation, source, destination, framesToProcess);
252 destination->copyWithSampleAccurateGainValuesFrom(*destination, totalGain, framesToProcess);
255 bool PannerNode::hasSampleAccurateValues() const
257 return m_positionX->hasSampleAccurateValues()
258 || m_positionY->hasSampleAccurateValues()
259 || m_positionZ->hasSampleAccurateValues()
260 || m_orientationX->hasSampleAccurateValues()
261 || m_orientationY->hasSampleAccurateValues()
262 || m_orientationZ->hasSampleAccurateValues();
265 bool PannerNode::shouldUseARate() const
267 return m_positionX->automationRate() == AutomationRate::ARate
268 || m_positionY->automationRate() == AutomationRate::ARate
269 || m_positionZ->automationRate() == AutomationRate::ARate
270 || m_orientationX->automationRate() == AutomationRate::ARate
271 || m_orientationY->automationRate() == AutomationRate::ARate
272 || m_orientationZ->automationRate() == AutomationRate::ARate;
275 void PannerNode::initialize()
280 m_panner = Panner::create(m_panningModel, sampleRate(), m_hrtfDatabaseLoader.get());
282 AudioNode::initialize();
285 void PannerNode::uninitialize()
287 if (!isInitialized())
291 AudioNode::uninitialize();
294 AudioListener& PannerNode::listener()
296 return context().listener();
299 void PannerNode::setPanningModel(PanningModelType model)
301 ASSERT(isMainThread());
303 if (!m_panner.get() || model != m_panningModel) {
304 // This synchronizes with process().
305 auto locker = holdLock(m_pannerMutex);
307 m_panner = Panner::create(model, sampleRate(), m_hrtfDatabaseLoader.get());
308 m_panningModel = model;
312 FloatPoint3D PannerNode::position() const
314 return FloatPoint3D(m_positionX->value(), m_positionY->value(), m_positionZ->value());
317 ExceptionOr<void> PannerNode::setPosition(float x, float y, float z)
319 ASSERT(isMainThread());
321 // This synchronizes with process().
322 auto locker = holdLock(m_pannerMutex);
324 auto now = context().currentTime();
326 auto result = m_positionX->setValueAtTime(x, now);
327 if (result.hasException())
328 return result.releaseException();
329 result = m_positionY->setValueAtTime(y, now);
330 if (result.hasException())
331 return result.releaseException();
332 result = m_positionZ->setValueAtTime(z, now);
333 if (result.hasException())
334 return result.releaseException();
339 FloatPoint3D PannerNode::orientation() const
341 return FloatPoint3D(m_orientationX->value(), m_orientationY->value(), m_orientationZ->value());
344 ExceptionOr<void> PannerNode::setOrientation(float x, float y, float z)
346 ASSERT(isMainThread());
348 // This synchronizes with process().
349 auto locker = holdLock(m_pannerMutex);
351 auto now = context().currentTime();
353 auto result = m_orientationX->setValueAtTime(x, now);
354 if (result.hasException())
355 return result.releaseException();
356 result = m_orientationY->setValueAtTime(y, now);
357 if (result.hasException())
358 return result.releaseException();
359 result = m_orientationZ->setValueAtTime(z, now);
360 if (result.hasException())
361 return result.releaseException();
366 DistanceModelType PannerNode::distanceModel() const
368 return const_cast<PannerNode*>(this)->m_distanceEffect.model();
371 void PannerNode::setDistanceModel(DistanceModelType model)
373 ASSERT(isMainThread());
375 // This synchronizes with process().
376 auto locker = holdLock(m_pannerMutex);
378 m_distanceEffect.setModel(model, true);
381 ExceptionOr<void> PannerNode::setRefDistance(double refDistance)
383 ASSERT(isMainThread());
386 return Exception { RangeError, "refDistance cannot be set to a negative value"_s };
388 // This synchronizes with process().
389 auto locker = holdLock(m_pannerMutex);
391 m_distanceEffect.setRefDistance(refDistance);
395 ExceptionOr<void> PannerNode::setMaxDistance(double maxDistance)
397 ASSERT(isMainThread());
399 if (maxDistance <= 0)
400 return Exception { RangeError, "maxDistance cannot be set to a non-positive value"_s };
402 // This synchronizes with process().
403 auto locker = holdLock(m_pannerMutex);
405 m_distanceEffect.setMaxDistance(maxDistance);
409 ExceptionOr<void> PannerNode::setRolloffFactor(double rolloffFactor)
411 ASSERT(isMainThread());
413 if (rolloffFactor < 0)
414 return Exception { RangeError, "rolloffFactor cannot be set to a negative value"_s };
416 // This synchronizes with process().
417 auto locker = holdLock(m_pannerMutex);
419 m_distanceEffect.setRolloffFactor(rolloffFactor);
423 ExceptionOr<void> PannerNode::setConeOuterGain(double gain)
425 ASSERT(isMainThread());
427 if (gain < 0 || gain > 1)
428 return Exception { InvalidStateError, "coneOuterGain must be in [0, 1]"_s };
430 // This synchronizes with process().
431 auto locker = holdLock(m_pannerMutex);
433 m_coneEffect.setOuterGain(gain);
437 void PannerNode::setConeOuterAngle(double angle)
439 ASSERT(isMainThread());
441 // This synchronizes with process().
442 auto locker = holdLock(m_pannerMutex);
444 m_coneEffect.setOuterAngle(angle);
447 void PannerNode::setConeInnerAngle(double angle)
449 ASSERT(isMainThread());
451 // This synchronizes with process().
452 auto locker = holdLock(m_pannerMutex);
454 m_coneEffect.setInnerAngle(angle);
457 ExceptionOr<void> PannerNode::setChannelCount(unsigned channelCount)
459 ASSERT(isMainThread());
461 if (channelCount > 2)
462 return Exception { NotSupportedError, "PannerNode's channelCount cannot be greater than 2"_s };
464 return AudioNode::setChannelCount(channelCount);
467 ExceptionOr<void> PannerNode::setChannelCountMode(ChannelCountMode mode)
469 ASSERT(isMainThread());
471 if (mode == ChannelCountMode::Max)
472 return Exception { NotSupportedError, "PannerNode's channelCountMode cannot be max"_s };
474 return AudioNode::setChannelCountMode(mode);
477 void PannerNode::calculateAzimuthElevation(double* outAzimuth, double* outElevation, const FloatPoint3D& position, const FloatPoint3D& listenerPosition, const FloatPoint3D& listenerFront, const FloatPoint3D& listenerUp)
479 // FIXME: we should cache azimuth and elevation (if possible), so we only re-calculate if a change has been made.
481 // Calculate the source-listener vector
482 FloatPoint3D sourceListener = position - listenerPosition;
484 if (sourceListener.isZero()) {
485 // degenerate case if source and listener are at the same point
491 sourceListener.normalize();
494 FloatPoint3D listenerRight = listenerFront.cross(listenerUp);
495 listenerRight.normalize();
497 FloatPoint3D listenerFrontNorm = listenerFront;
498 listenerFrontNorm.normalize();
500 FloatPoint3D up = listenerRight.cross(listenerFrontNorm);
502 float upProjection = sourceListener.dot(up);
504 FloatPoint3D projectedSource = sourceListener - upProjection * up;
505 projectedSource.normalize();
507 double azimuth = rad2deg(std::acos(std::clamp(projectedSource.dot(listenerRight), -1.0f, 1.0f)));
508 fixNANs(azimuth); // avoid illegal values
510 // Source in front or behind the listener
511 double frontBack = projectedSource.dot(listenerFrontNorm);
513 azimuth = 360.0 - azimuth;
515 // Make azimuth relative to "front" and not "right" listener vector
516 if ((azimuth >= 0.0) && (azimuth <= 270.0))
517 azimuth = 90.0 - azimuth;
519 azimuth = 450.0 - azimuth;
522 double elevation = 90.0 - 180.0 * acos(sourceListener.dot(up)) / piDouble;
523 fixNANs(elevation); // avoid illegal values
525 if (elevation > 90.0)
526 elevation = 180.0 - elevation;
527 else if (elevation < -90.0)
528 elevation = -180.0 - elevation;
531 *outAzimuth = azimuth;
533 *outElevation = elevation;
536 void PannerNode::azimuthElevation(double* outAzimuth, double* outElevation)
538 ASSERT(context().isAudioThread());
540 calculateAzimuthElevation(outAzimuth, outElevation, position(), listener().position(), listener().orientation(), listener().upVector());
543 float PannerNode::dopplerRate()
548 bool PannerNode::requiresTailProcessing() const
550 // If there's no internal panner method set up yet, assume we require tail
551 // processing in case the HRTF panner is set later, which does require tail
553 return !m_panner || m_panner->requiresTailProcessing();
556 float PannerNode::calculateDistanceConeGain(const FloatPoint3D& sourcePosition, const FloatPoint3D& orientation, const FloatPoint3D& listenerPosition)
558 double listenerDistance = sourcePosition.distanceTo(listenerPosition);
559 double distanceGain = m_distanceEffect.gain(listenerDistance);
561 // FIXME: could optimize by caching coneGain
562 double coneGain = m_coneEffect.gain(sourcePosition, orientation, listenerPosition);
564 return float(distanceGain * coneGain);
567 float PannerNode::distanceConeGain()
569 ASSERT(context().isAudioThread());
571 return calculateDistanceConeGain(position(), orientation(), listener().position());
574 void PannerNode::notifyAudioSourcesConnectedToNode(AudioNode* node, HashSet<AudioNode*>& visitedNodes)
580 // First check if this node is an AudioBufferSourceNode. If so, let it know about us so that doppler shift pitch can be taken into account.
581 if (node->nodeType() == NodeTypeAudioBufferSource) {
582 AudioBufferSourceNode* bufferSourceNode = reinterpret_cast<AudioBufferSourceNode*>(node);
583 bufferSourceNode->setPannerNode(this);
585 // Go through all inputs to this node.
586 for (unsigned i = 0; i < node->numberOfInputs(); ++i) {
587 AudioNodeInput* input = node->input(i);
589 // For each input, go through all of its connections, looking for AudioBufferSourceNodes.
590 for (unsigned j = 0; j < input->numberOfRenderingConnections(); ++j) {
591 AudioNodeOutput* connectedOutput = input->renderingOutput(j);
592 AudioNode* connectedNode = connectedOutput->node();
593 if (visitedNodes.contains(connectedNode))
596 visitedNodes.add(connectedNode);
597 notifyAudioSourcesConnectedToNode(connectedNode, visitedNodes);
603 } // namespace WebCore
605 #endif // ENABLE(WEB_AUDIO)