2 * Copyright (C) 2010, Google Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
15 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
16 * DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
17 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
18 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
19 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
20 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
21 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
22 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 #include "PannerNode.h"
30 #include "AudioBufferSourceNode.h"
32 #include "AudioContext.h"
33 #include "AudioNodeInput.h"
34 #include "AudioNodeOutput.h"
35 #include "ChannelCountMode.h"
36 #include "HRTFDatabaseLoader.h"
37 #include "HRTFPanner.h"
38 #include "ScriptExecutionContext.h"
39 #include <wtf/IsoMallocInlines.h>
40 #include <wtf/MathExtras.h>
44 WTF_MAKE_ISO_ALLOCATED_IMPL(PannerNode);
46 static void fixNANs(double &x)
48 if (std::isnan(x) || std::isinf(x))
52 PannerNodeBase::PannerNodeBase(BaseAudioContext& context)
57 ExceptionOr<Ref<PannerNode>> PannerNode::create(BaseAudioContext& context, const PannerOptions& options)
59 if (context.isStopped())
60 return Exception { InvalidStateError };
62 context.lazyInitialize();
64 auto panner = adoptRef(*new PannerNode(context, options));
66 auto result = panner->handleAudioNodeOptions(options, { 2, ChannelCountMode::ClampedMax, ChannelInterpretation::Speakers });
67 if (result.hasException())
68 return result.releaseException();
70 result = panner->setMaxDistance(options.maxDistance);
71 if (result.hasException())
72 return result.releaseException();
74 result = panner->setRefDistance(options.refDistance);
75 if (result.hasException())
76 return result.releaseException();
78 result = panner->setRolloffFactor(options.rolloffFactor);
79 if (result.hasException())
80 return result.releaseException();
82 result = panner->setConeOuterGain(options.coneOuterGain);
83 if (result.hasException())
84 return result.releaseException();
89 PannerNode::PannerNode(BaseAudioContext& context, const PannerOptions& options)
90 : PannerNodeBase(context)
91 , m_panningModel(options.panningModel)
92 , m_positionX(AudioParam::create(context, "positionX"_s, options.positionX, -FLT_MAX, FLT_MAX, AutomationRate::ARate))
93 , m_positionY(AudioParam::create(context, "positionY"_s, options.positionY, -FLT_MAX, FLT_MAX, AutomationRate::ARate))
94 , m_positionZ(AudioParam::create(context, "positionZ"_s, options.positionZ, -FLT_MAX, FLT_MAX, AutomationRate::ARate))
95 , m_orientationX(AudioParam::create(context, "orientationX"_s, options.orientationX, -FLT_MAX, FLT_MAX, AutomationRate::ARate))
96 , m_orientationY(AudioParam::create(context, "orientationY"_s, options.orientationY, -FLT_MAX, FLT_MAX, AutomationRate::ARate))
97 , m_orientationZ(AudioParam::create(context, "orientationZ"_s, options.orientationZ, -FLT_MAX, FLT_MAX, AutomationRate::ARate))
98 // Load the HRTF database asynchronously so we don't block the Javascript thread while creating the HRTF database.
99 , m_hrtfDatabaseLoader(HRTFDatabaseLoader::createAndLoadAsynchronouslyIfNecessary(context.sampleRate()))
101 setNodeType(NodeTypePanner);
103 setDistanceModel(options.distanceModel);
104 setConeInnerAngle(options.coneInnerAngle);
105 setConeOuterAngle(options.coneOuterAngle);
113 PannerNode::~PannerNode()
118 void PannerNode::pullInputs(size_t framesToProcess)
120 // We override pullInputs(), so we can detect new AudioSourceNodes which have connected to us when new connections are made.
121 // These AudioSourceNodes need to be made aware of our existence in order to handle doppler shift pitch changes.
122 if (m_connectionCount != context().connectionCount()) {
123 m_connectionCount = context().connectionCount();
125 // Recursively go through all nodes connected to us.
126 HashSet<AudioNode*> visitedNodes;
127 notifyAudioSourcesConnectedToNode(this, visitedNodes);
130 AudioNode::pullInputs(framesToProcess);
133 void PannerNode::process(size_t framesToProcess)
135 AudioBus* destination = output(0)->bus();
137 if (!isInitialized() || !input(0)->isConnected() || !m_panner.get()) {
142 AudioBus* source = input(0)->bus();
148 // HRTFDatabase should be loaded before proceeding for offline audio context when panningModel() is "HRTF".
149 if (panningModel() == PanningModelType::HRTF && !m_hrtfDatabaseLoader->isLoaded()) {
150 if (context().isOfflineContext())
151 m_hrtfDatabaseLoader->waitForLoaderThreadCompletion();
158 // The audio thread can't block on this lock, so we use std::try_to_lock instead.
159 std::unique_lock<Lock> lock(m_pannerMutex, std::try_to_lock);
160 if (!lock.owns_lock()) {
161 // Too bad - The try_lock() failed. We must be in the middle of changing the panner.
166 if ((hasSampleAccurateValues() || listener().hasSampleAccurateValues()) && (shouldUseARate() || listener().shouldUseARate())) {
167 processSampleAccurateValues(destination, source, framesToProcess);
171 // Apply the panning effect.
174 azimuthElevation(&azimuth, &elevation);
175 m_panner->pan(azimuth, elevation, source, destination, framesToProcess);
177 // Get the distance and cone gain.
178 double totalGain = distanceConeGain();
180 // Apply gain in-place.
181 destination->copyWithGainFrom(*destination, totalGain);
184 void PannerNode::processOnlyAudioParams(size_t framesToProcess)
186 float values[AudioUtilities::renderQuantumSize];
187 ASSERT(framesToProcess <= AudioUtilities::renderQuantumSize);
189 m_positionX->calculateSampleAccurateValues(values, framesToProcess);
190 m_positionY->calculateSampleAccurateValues(values, framesToProcess);
191 m_positionZ->calculateSampleAccurateValues(values, framesToProcess);
193 m_orientationX->calculateSampleAccurateValues(values, framesToProcess);
194 m_orientationY->calculateSampleAccurateValues(values, framesToProcess);
195 m_orientationZ->calculateSampleAccurateValues(values, framesToProcess);
197 listener().updateValuesIfNeeded(framesToProcess);
200 void PannerNode::processSampleAccurateValues(AudioBus* destination, const AudioBus* source, size_t framesToProcess)
202 // Get the sample accurate values from all of the AudioParams, including the
203 // values from the AudioListener.
204 float pannerX[AudioUtilities::renderQuantumSize];
205 float pannerY[AudioUtilities::renderQuantumSize];
206 float pannerZ[AudioUtilities::renderQuantumSize];
208 float orientationX[AudioUtilities::renderQuantumSize];
209 float orientationY[AudioUtilities::renderQuantumSize];
210 float orientationZ[AudioUtilities::renderQuantumSize];
212 m_positionX->calculateSampleAccurateValues(pannerX, framesToProcess);
213 m_positionY->calculateSampleAccurateValues(pannerY, framesToProcess);
214 m_positionZ->calculateSampleAccurateValues(pannerZ, framesToProcess);
215 m_orientationX->calculateSampleAccurateValues(orientationX, framesToProcess);
216 m_orientationY->calculateSampleAccurateValues(orientationY, framesToProcess);
217 m_orientationZ->calculateSampleAccurateValues(orientationZ, framesToProcess);
219 // Get the automation values from the listener.
220 const float* listenerX = listener().positionXValues(AudioUtilities::renderQuantumSize);
221 const float* listenerY = listener().positionYValues(AudioUtilities::renderQuantumSize);
222 const float* listenerZ = listener().positionZValues(AudioUtilities::renderQuantumSize);
224 const float* forwardX = listener().forwardXValues(AudioUtilities::renderQuantumSize);
225 const float* forwardY = listener().forwardYValues(AudioUtilities::renderQuantumSize);
226 const float* forwardZ = listener().forwardZValues(AudioUtilities::renderQuantumSize);
228 const float* upX = listener().upXValues(AudioUtilities::renderQuantumSize);
229 const float* upY = listener().upYValues(AudioUtilities::renderQuantumSize);
230 const float* upZ = listener().upZValues(AudioUtilities::renderQuantumSize);
232 // Compute the azimuth, elevation, and total gains for each position.
233 double azimuth[AudioUtilities::renderQuantumSize];
234 double elevation[AudioUtilities::renderQuantumSize];
235 float totalGain[AudioUtilities::renderQuantumSize];
237 for (size_t k = 0; k < framesToProcess; ++k) {
238 FloatPoint3D pannerPosition(pannerX[k], pannerY[k], pannerZ[k]);
239 FloatPoint3D orientation(orientationX[k], orientationY[k], orientationZ[k]);
240 FloatPoint3D listenerPosition(listenerX[k], listenerY[k], listenerZ[k]);
241 FloatPoint3D listenerFront(forwardX[k], forwardY[k], forwardZ[k]);
242 FloatPoint3D listenerUp(upX[k], upY[k], upZ[k]);
244 calculateAzimuthElevation(&azimuth[k], &elevation[k], pannerPosition, listenerPosition, listenerFront, listenerUp);
246 // Get distance and cone gain
247 totalGain[k] = calculateDistanceConeGain(pannerPosition, orientation, listenerPosition);
250 m_panner->panWithSampleAccurateValues(azimuth, elevation, source, destination, framesToProcess);
251 destination->copyWithSampleAccurateGainValuesFrom(*destination, totalGain, framesToProcess);
254 bool PannerNode::hasSampleAccurateValues() const
256 return m_positionX->hasSampleAccurateValues()
257 || m_positionY->hasSampleAccurateValues()
258 || m_positionZ->hasSampleAccurateValues()
259 || m_orientationX->hasSampleAccurateValues()
260 || m_orientationY->hasSampleAccurateValues()
261 || m_orientationZ->hasSampleAccurateValues();
264 bool PannerNode::shouldUseARate() const
266 return m_positionX->automationRate() == AutomationRate::ARate
267 || m_positionY->automationRate() == AutomationRate::ARate
268 || m_positionZ->automationRate() == AutomationRate::ARate
269 || m_orientationX->automationRate() == AutomationRate::ARate
270 || m_orientationY->automationRate() == AutomationRate::ARate
271 || m_orientationZ->automationRate() == AutomationRate::ARate;
274 void PannerNode::initialize()
279 m_panner = Panner::create(m_panningModel, sampleRate(), m_hrtfDatabaseLoader.get());
281 AudioNode::initialize();
284 void PannerNode::uninitialize()
286 if (!isInitialized())
290 AudioNode::uninitialize();
293 AudioListener& PannerNode::listener()
295 return context().listener();
298 void PannerNode::setPanningModel(PanningModelType model)
300 ASSERT(isMainThread());
302 if (!m_panner.get() || model != m_panningModel) {
303 // This synchronizes with process().
304 auto locker = holdLock(m_pannerMutex);
306 m_panner = Panner::create(model, sampleRate(), m_hrtfDatabaseLoader.get());
307 m_panningModel = model;
311 FloatPoint3D PannerNode::position() const
313 return FloatPoint3D(m_positionX->value(), m_positionY->value(), m_positionZ->value());
316 ExceptionOr<void> PannerNode::setPosition(float x, float y, float z)
318 ASSERT(isMainThread());
320 // This synchronizes with process().
321 auto locker = holdLock(m_pannerMutex);
323 auto now = context().currentTime();
325 auto result = m_positionX->setValueAtTime(x, now);
326 if (result.hasException())
327 return result.releaseException();
328 result = m_positionY->setValueAtTime(y, now);
329 if (result.hasException())
330 return result.releaseException();
331 result = m_positionZ->setValueAtTime(z, now);
332 if (result.hasException())
333 return result.releaseException();
338 FloatPoint3D PannerNode::orientation() const
340 return FloatPoint3D(m_orientationX->value(), m_orientationY->value(), m_orientationZ->value());
343 ExceptionOr<void> PannerNode::setOrientation(float x, float y, float z)
345 ASSERT(isMainThread());
347 // This synchronizes with process().
348 auto locker = holdLock(m_pannerMutex);
350 auto now = context().currentTime();
352 auto result = m_orientationX->setValueAtTime(x, now);
353 if (result.hasException())
354 return result.releaseException();
355 result = m_orientationY->setValueAtTime(y, now);
356 if (result.hasException())
357 return result.releaseException();
358 result = m_orientationZ->setValueAtTime(z, now);
359 if (result.hasException())
360 return result.releaseException();
365 DistanceModelType PannerNode::distanceModel() const
367 return const_cast<PannerNode*>(this)->m_distanceEffect.model();
370 void PannerNode::setDistanceModel(DistanceModelType model)
372 ASSERT(isMainThread());
374 // This synchronizes with process().
375 auto locker = holdLock(m_pannerMutex);
377 m_distanceEffect.setModel(model, true);
380 ExceptionOr<void> PannerNode::setRefDistance(double refDistance)
382 ASSERT(isMainThread());
385 return Exception { RangeError, "refDistance cannot be set to a negative value"_s };
387 // This synchronizes with process().
388 auto locker = holdLock(m_pannerMutex);
390 m_distanceEffect.setRefDistance(refDistance);
394 ExceptionOr<void> PannerNode::setMaxDistance(double maxDistance)
396 ASSERT(isMainThread());
398 if (maxDistance <= 0)
399 return Exception { RangeError, "maxDistance cannot be set to a non-positive value"_s };
401 // This synchronizes with process().
402 auto locker = holdLock(m_pannerMutex);
404 m_distanceEffect.setMaxDistance(maxDistance);
408 ExceptionOr<void> PannerNode::setRolloffFactor(double rolloffFactor)
410 ASSERT(isMainThread());
412 if (rolloffFactor < 0)
413 return Exception { RangeError, "rolloffFactor cannot be set to a negative value"_s };
415 // This synchronizes with process().
416 auto locker = holdLock(m_pannerMutex);
418 m_distanceEffect.setRolloffFactor(rolloffFactor);
422 ExceptionOr<void> PannerNode::setConeOuterGain(double gain)
424 ASSERT(isMainThread());
426 if (gain < 0 || gain > 1)
427 return Exception { InvalidStateError, "coneOuterGain must be in [0, 1]"_s };
429 // This synchronizes with process().
430 auto locker = holdLock(m_pannerMutex);
432 m_coneEffect.setOuterGain(gain);
436 void PannerNode::setConeOuterAngle(double angle)
438 ASSERT(isMainThread());
440 // This synchronizes with process().
441 auto locker = holdLock(m_pannerMutex);
443 m_coneEffect.setOuterAngle(angle);
446 void PannerNode::setConeInnerAngle(double angle)
448 ASSERT(isMainThread());
450 // This synchronizes with process().
451 auto locker = holdLock(m_pannerMutex);
453 m_coneEffect.setInnerAngle(angle);
456 ExceptionOr<void> PannerNode::setChannelCount(unsigned channelCount)
458 ASSERT(isMainThread());
460 if (channelCount > 2)
461 return Exception { NotSupportedError, "PannerNode's channelCount cannot be greater than 2"_s };
463 return AudioNode::setChannelCount(channelCount);
466 ExceptionOr<void> PannerNode::setChannelCountMode(ChannelCountMode mode)
468 ASSERT(isMainThread());
470 if (mode == ChannelCountMode::Max)
471 return Exception { NotSupportedError, "PannerNode's channelCountMode cannot be max"_s };
473 return AudioNode::setChannelCountMode(mode);
476 void PannerNode::calculateAzimuthElevation(double* outAzimuth, double* outElevation, const FloatPoint3D& position, const FloatPoint3D& listenerPosition, const FloatPoint3D& listenerFront, const FloatPoint3D& listenerUp)
478 // FIXME: we should cache azimuth and elevation (if possible), so we only re-calculate if a change has been made.
480 // Calculate the source-listener vector
481 FloatPoint3D sourceListener = position - listenerPosition;
483 if (sourceListener.isZero()) {
484 // degenerate case if source and listener are at the same point
490 sourceListener.normalize();
493 FloatPoint3D listenerRight = listenerFront.cross(listenerUp);
494 listenerRight.normalize();
496 FloatPoint3D listenerFrontNorm = listenerFront;
497 listenerFrontNorm.normalize();
499 FloatPoint3D up = listenerRight.cross(listenerFrontNorm);
501 float upProjection = sourceListener.dot(up);
503 FloatPoint3D projectedSource = sourceListener - upProjection * up;
504 projectedSource.normalize();
506 double azimuth = rad2deg(std::acos(std::clamp(projectedSource.dot(listenerRight), -1.0f, 1.0f)));
507 fixNANs(azimuth); // avoid illegal values
509 // Source in front or behind the listener
510 double frontBack = projectedSource.dot(listenerFrontNorm);
512 azimuth = 360.0 - azimuth;
514 // Make azimuth relative to "front" and not "right" listener vector
515 if ((azimuth >= 0.0) && (azimuth <= 270.0))
516 azimuth = 90.0 - azimuth;
518 azimuth = 450.0 - azimuth;
521 double elevation = 90.0 - 180.0 * acos(sourceListener.dot(up)) / piDouble;
522 fixNANs(elevation); // avoid illegal values
524 if (elevation > 90.0)
525 elevation = 180.0 - elevation;
526 else if (elevation < -90.0)
527 elevation = -180.0 - elevation;
530 *outAzimuth = azimuth;
532 *outElevation = elevation;
535 void PannerNode::azimuthElevation(double* outAzimuth, double* outElevation)
537 ASSERT(context().isAudioThread());
539 calculateAzimuthElevation(outAzimuth, outElevation, position(), listener().position(), listener().orientation(), listener().upVector());
542 float PannerNode::dopplerRate()
547 bool PannerNode::requiresTailProcessing() const
549 // If there's no internal panner method set up yet, assume we require tail
550 // processing in case the HRTF panner is set later, which does require tail
552 return !m_panner || m_panner->requiresTailProcessing();
555 float PannerNode::calculateDistanceConeGain(const FloatPoint3D& sourcePosition, const FloatPoint3D& orientation, const FloatPoint3D& listenerPosition)
557 double listenerDistance = sourcePosition.distanceTo(listenerPosition);
558 double distanceGain = m_distanceEffect.gain(listenerDistance);
560 // FIXME: could optimize by caching coneGain
561 double coneGain = m_coneEffect.gain(sourcePosition, orientation, listenerPosition);
563 return float(distanceGain * coneGain);
566 float PannerNode::distanceConeGain()
568 ASSERT(context().isAudioThread());
570 return calculateDistanceConeGain(position(), orientation(), listener().position());
573 void PannerNode::notifyAudioSourcesConnectedToNode(AudioNode* node, HashSet<AudioNode*>& visitedNodes)
579 // First check if this node is an AudioBufferSourceNode. If so, let it know about us so that doppler shift pitch can be taken into account.
580 if (node->nodeType() == NodeTypeAudioBufferSource) {
581 AudioBufferSourceNode* bufferSourceNode = reinterpret_cast<AudioBufferSourceNode*>(node);
582 bufferSourceNode->setPannerNode(this);
584 // Go through all inputs to this node.
585 for (unsigned i = 0; i < node->numberOfInputs(); ++i) {
586 AudioNodeInput* input = node->input(i);
588 // For each input, go through all of its connections, looking for AudioBufferSourceNodes.
589 for (unsigned j = 0; j < input->numberOfRenderingConnections(); ++j) {
590 AudioNodeOutput* connectedOutput = input->renderingOutput(j);
591 AudioNode* connectedNode = connectedOutput->node();
592 if (visitedNodes.contains(connectedNode))
595 visitedNodes.add(connectedNode);
596 notifyAudioSourcesConnectedToNode(connectedNode, visitedNodes);
602 } // namespace WebCore
604 #endif // ENABLE(WEB_AUDIO)